From 025c67586e4b3767ad8593a8918b6c1077fc2a60 Mon Sep 17 00:00:00 2001 From: Perfare Date: Sun, 5 Jul 2020 06:53:01 +0800 Subject: [PATCH] Initial --- .gitattributes | 2 + .gitignore | 14 + README.md | 17 +- build.gradle | 33 + gradle.properties | 19 + gradle/wrapper/gradle-wrapper.jar | Bin 0 -> 54329 bytes gradle/wrapper/gradle-wrapper.properties | 5 + gradlew | 172 + gradlew.bat | 84 + module.gradle | 23 + module/.gitignore | 3 + module/build.gradle | 115 + module/src/main/AndroidManifest.xml | 1 + module/src/main/cpp/CMakeLists.txt | 29 + module/src/main/cpp/game.h | 36 + module/src/main/cpp/hook.cpp | 111 + module/src/main/cpp/hook.h | 29 + module/src/main/cpp/il2cpp-tabledefs.h | 152 + module/src/main/cpp/il2cpp.cpp | 446 + module/src/main/cpp/il2cpp.h | 20 + .../2017.1.0f3/il2cpp-api-functions.h | 263 + .../cpp/il2cppapi/2017.1.0f3/il2cpp-class.h | 1027 + .../2017.1.3f1/il2cpp-api-functions.h | 263 + .../cpp/il2cppapi/2017.1.3f1/il2cpp-class.h | 1028 + .../2017.2.0f3/il2cpp-api-functions.h | 268 + .../cpp/il2cppapi/2017.2.0f3/il2cpp-class.h | 1027 + .../2017.2.1f1/il2cpp-api-functions.h | 268 + .../cpp/il2cppapi/2017.2.1f1/il2cpp-class.h | 1028 + .../2018.1.0f2/il2cpp-api-functions.h | 251 + .../cpp/il2cppapi/2018.1.0f2/il2cpp-class.h | 1099 + .../2018.2.0f2/il2cpp-api-functions.h | 258 + .../cpp/il2cppapi/2018.2.0f2/il2cpp-class.h | 1131 + .../2018.3.0f2/il2cpp-api-functions.h | 274 + .../cpp/il2cppapi/2018.3.0f2/il2cpp-class.h | 1122 + .../2018.3.8f1/il2cpp-api-functions.h | 274 + .../cpp/il2cppapi/2018.3.8f1/il2cpp-class.h | 1123 + .../2018.4.18f1/il2cpp-api-functions.h | 274 + .../cpp/il2cppapi/2018.4.18f1/il2cpp-class.h | 1130 + .../2019.1.0f2/il2cpp-api-functions.h | 283 + .../cpp/il2cppapi/2019.1.0f2/il2cpp-class.h | 1368 + .../2019.2.0f1/il2cpp-api-functions.h | 283 + .../cpp/il2cppapi/2019.2.0f1/il2cpp-class.h | 1356 + .../2019.3.0f6/il2cpp-api-functions.h | 304 + .../cpp/il2cppapi/2019.3.0f6/il2cpp-class.h | 1405 + .../2019.3.7f1/il2cpp-api-functions.h | 304 + .../cpp/il2cppapi/2019.3.7f1/il2cpp-class.h | 1412 + .../il2cppapi/5.3.2f1/il2cpp-api-functions.h | 250 + .../main/cpp/il2cppapi/5.3.2f1/il2cpp-class.h | 877 + .../il2cppapi/5.3.3f1/il2cpp-api-functions.h | 250 + .../main/cpp/il2cppapi/5.3.3f1/il2cpp-class.h | 795 + .../il2cppapi/5.3.5f1/il2cpp-api-functions.h | 253 + .../main/cpp/il2cppapi/5.3.5f1/il2cpp-class.h | 924 + .../il2cppapi/5.3.6f1/il2cpp-api-functions.h | 257 + .../main/cpp/il2cppapi/5.3.6f1/il2cpp-class.h | 985 + .../il2cppapi/5.3.7f1/il2cpp-api-functions.h | 257 + .../main/cpp/il2cppapi/5.3.7f1/il2cpp-class.h | 985 + .../il2cppapi/5.4.0f3/il2cpp-api-functions.h | 257 + .../main/cpp/il2cppapi/5.4.0f3/il2cpp-class.h | 985 + .../il2cppapi/5.4.1f1/il2cpp-api-functions.h | 257 + .../main/cpp/il2cppapi/5.4.1f1/il2cpp-class.h | 985 + .../il2cppapi/5.4.4f1/il2cpp-api-functions.h | 257 + .../main/cpp/il2cppapi/5.4.4f1/il2cpp-class.h | 986 + .../il2cppapi/5.5.0f3/il2cpp-api-functions.h | 258 + .../main/cpp/il2cppapi/5.5.0f3/il2cpp-class.h | 1002 + .../il2cppapi/5.5.1f1/il2cpp-api-functions.h | 258 + .../main/cpp/il2cppapi/5.5.1f1/il2cpp-class.h | 1003 + .../il2cppapi/5.6.0f3/il2cpp-api-functions.h | 262 + .../main/cpp/il2cppapi/5.6.0f3/il2cpp-class.h | 1013 + module/src/main/cpp/log.h | 16 + module/src/main/cpp/main.cpp | 79 + module/src/main/cpp/whale/CMakeLists.txt | 201 + module/src/main/cpp/whale/include/whale.h | 23 + .../cpp/whale/src/android/android_build.h | 30 + .../whale/src/android/art/art_hook_param.h | 31 + .../src/android/art/art_jni_trampoline.cc | 261 + .../src/android/art/art_jni_trampoline.h | 16 + .../cpp/whale/src/android/art/art_method.cc | 79 + .../cpp/whale/src/android/art/art_method.h | 145 + .../cpp/whale/src/android/art/art_runtime.cc | 495 + .../cpp/whale/src/android/art/art_runtime.h | 153 + .../src/android/art/art_symbol_resolver.cc | 118 + .../src/android/art/art_symbol_resolver.h | 54 + .../cpp/whale/src/android/art/java_types.cc | 99 + .../cpp/whale/src/android/art/java_types.h | 61 + .../cpp/whale/src/android/art/modifiers.h | 58 + .../whale/src/android/art/native_on_load.cc | 96 + .../whale/src/android/art/native_on_load.h | 25 + .../android/art/scoped_thread_state_change.cc | 76 + .../android/art/scoped_thread_state_change.h | 37 + .../src/android/art/well_known_classes.cc | 101 + .../src/android/art/well_known_classes.h | 31 + .../main/cpp/whale/src/android/jni_helper.h | 73 + .../cpp/whale/src/android/native_bridge.h | 131 + .../main/cpp/whale/src/assembler/assembler.cc | 64 + .../main/cpp/whale/src/assembler/assembler.h | 284 + .../src/main/cpp/whale/src/assembler/label.h | 108 + .../whale/src/assembler/managed_register.h | 44 + .../cpp/whale/src/assembler/memory_region.cc | 14 + .../cpp/whale/src/assembler/memory_region.h | 145 + .../cpp/whale/src/assembler/value_object.h | 15 + .../whale/src/assembler/vixl/CMakeLists.txt | 52 + .../vixl/aarch32/assembler-aarch32.cc | 27923 +++++++ .../vixl/aarch32/assembler-aarch32.h | 6159 ++ .../vixl/aarch32/constants-aarch32.cc | 855 + .../vixl/aarch32/constants-aarch32.h | 541 + .../assembler/vixl/aarch32/disasm-aarch32.cc | 67276 ++++++++++++++++ .../assembler/vixl/aarch32/disasm-aarch32.h | 2723 + .../vixl/aarch32/instructions-aarch32.cc | 742 + .../vixl/aarch32/instructions-aarch32.h | 1359 + .../vixl/aarch32/location-aarch32.cc | 152 + .../assembler/vixl/aarch32/location-aarch32.h | 411 + .../vixl/aarch32/macro-assembler-aarch32.cc | 2312 + .../vixl/aarch32/macro-assembler-aarch32.h | 11185 +++ .../vixl/aarch32/operands-aarch32.cc | 563 + .../assembler/vixl/aarch32/operands-aarch32.h | 927 + .../src/assembler/vixl/aarch64/abi-aarch64.h | 167 + .../vixl/aarch64/assembler-aarch64.cc | 6295 ++ .../vixl/aarch64/assembler-aarch64.h | 4548 ++ .../vixl/aarch64/constants-aarch64.h | 2661 + .../src/assembler/vixl/aarch64/cpu-aarch64.cc | 178 + .../src/assembler/vixl/aarch64/cpu-aarch64.h | 86 + .../aarch64/cpu-features-auditor-aarch64.cc | 1165 + .../aarch64/cpu-features-auditor-aarch64.h | 125 + .../assembler/vixl/aarch64/decoder-aarch64.cc | 1067 + .../assembler/vixl/aarch64/decoder-aarch64.h | 294 + .../assembler/vixl/aarch64/disasm-aarch64.cc | 6035 ++ .../assembler/vixl/aarch64/disasm-aarch64.h | 217 + .../vixl/aarch64/instructions-aarch64.cc | 713 + .../vixl/aarch64/instructions-aarch64.h | 896 + .../vixl/aarch64/instrument-aarch64.cc | 967 + .../vixl/aarch64/instrument-aarch64.h | 117 + .../assembler/vixl/aarch64/logic-aarch64.cc | 5484 ++ .../vixl/aarch64/macro-assembler-aarch64.cc | 3059 + .../vixl/aarch64/macro-assembler-aarch64.h | 4050 + .../vixl/aarch64/operands-aarch64.cc | 528 + .../assembler/vixl/aarch64/operands-aarch64.h | 993 + .../vixl/aarch64/pointer-auth-aarch64.cc | 197 + .../vixl/aarch64/simulator-aarch64.cc | 6940 ++ .../vixl/aarch64/simulator-aarch64.h | 3371 + .../aarch64/simulator-constants-aarch64.h | 192 + .../src/assembler/vixl/assembler-base-vixl.h | 101 + .../src/assembler/vixl/code-buffer-vixl.cc | 185 + .../src/assembler/vixl/code-buffer-vixl.h | 191 + .../vixl/code-generation-scopes-vixl.h | 322 + .../vixl/compiler-intrinsics-vixl.cc | 144 + .../assembler/vixl/compiler-intrinsics-vixl.h | 160 + .../whale/src/assembler/vixl/cpu-features.cc | 211 + .../whale/src/assembler/vixl/cpu-features.h | 379 + .../whale/src/assembler/vixl/globals-vixl.h | 283 + .../whale/src/assembler/vixl/invalset-vixl.h | 915 + .../vixl/macro-assembler-interface.h | 75 + .../whale/src/assembler/vixl/platform-vixl.h | 39 + .../src/assembler/vixl/pool-manager-impl.h | 522 + .../whale/src/assembler/vixl/pool-manager.h | 555 + .../whale/src/assembler/vixl/utils-vixl.cc | 555 + .../cpp/whale/src/assembler/vixl/utils-vixl.h | 1281 + .../whale/src/assembler/x86/assembler_x86.cc | 3092 + .../whale/src/assembler/x86/assembler_x86.h | 1100 + .../whale/src/assembler/x86/constants_x86.h | 94 + .../src/assembler/x86/managed_register_x86.cc | 69 + .../src/assembler/x86/managed_register_x86.h | 179 + .../whale/src/assembler/x86/registers_x86.h | 39 + .../src/assembler/x86_64/assembler_x86_64.cc | 3881 + .../src/assembler/x86_64/assembler_x86_64.h | 1276 + .../src/assembler/x86_64/constants_x86_64.h | 126 + .../x86_64/managed_register_x86_64.cc | 66 + .../x86_64/managed_register_x86_64.h | 172 + .../src/assembler/x86_64/registers_x86_64.h | 52 + module/src/main/cpp/whale/src/base/align.h | 144 + .../src/main/cpp/whale/src/base/array_ref.h | 195 + .../src/main/cpp/whale/src/base/bit_utils.h | 479 + .../src/main/cpp/whale/src/base/cxx_helper.h | 53 + module/src/main/cpp/whale/src/base/enums.h | 32 + module/src/main/cpp/whale/src/base/logging.h | 222 + module/src/main/cpp/whale/src/base/macros.h | 51 + module/src/main/cpp/whale/src/base/offsets.h | 60 + .../main/cpp/whale/src/base/primitive_types.h | 22 + .../src/main/cpp/whale/src/base/singleton.h | 32 + .../main/cpp/whale/src/base/stringprintf.h | 22 + .../main/cpp/whale/src/dbi/arm/decoder_arm.cc | 21 + .../main/cpp/whale/src/dbi/arm/decoder_arm.h | 32 + .../cpp/whale/src/dbi/arm/decoder_thumb.cc | 41 + .../cpp/whale/src/dbi/arm/decoder_thumb.h | 51 + .../cpp/whale/src/dbi/arm/inline_hook_arm.cc | 112 + .../cpp/whale/src/dbi/arm/inline_hook_arm.h | 38 + .../src/dbi/arm/instruction_rewriter_arm.cc | 406 + .../src/dbi/arm/instruction_rewriter_arm.h | 133 + .../cpp/whale/src/dbi/arm/registers_arm.h | 73 + .../cpp/whale/src/dbi/arm64/decoder_arm64.cc | 23 + .../cpp/whale/src/dbi/arm64/decoder_arm64.h | 25 + .../whale/src/dbi/arm64/inline_hook_arm64.cc | 77 + .../whale/src/dbi/arm64/inline_hook_arm64.h | 35 + .../dbi/arm64/instruction_rewriter_arm64.cc | 256 + .../dbi/arm64/instruction_rewriter_arm64.h | 66 + .../cpp/whale/src/dbi/arm64/registers_arm64.h | 230 + .../src/main/cpp/whale/src/dbi/backup_code.h | 56 + .../whale/src/dbi/darwin/macho_import_hook.cc | 128 + .../whale/src/dbi/darwin/macho_import_hook.h | 50 + .../src/main/cpp/whale/src/dbi/hook_common.cc | 53 + .../src/main/cpp/whale/src/dbi/hook_common.h | 112 + .../cpp/whale/src/dbi/instruction_rewriter.h | 30 + .../main/cpp/whale/src/dbi/instruction_set.cc | 77 + .../main/cpp/whale/src/dbi/instruction_set.h | 110 + .../cpp/whale/src/dbi/x86/distorm/config.h | 180 + .../cpp/whale/src/dbi/x86/distorm/decoder.c | 650 + .../cpp/whale/src/dbi/x86/distorm/decoder.h | 33 + .../cpp/whale/src/dbi/x86/distorm/distorm.c | 456 + .../cpp/whale/src/dbi/x86/distorm/distorm.h | 484 + .../whale/src/dbi/x86/distorm/instructions.c | 597 + .../whale/src/dbi/x86/distorm/instructions.h | 463 + .../cpp/whale/src/dbi/x86/distorm/insts.c | 7940 ++ .../cpp/whale/src/dbi/x86/distorm/insts.h | 64 + .../cpp/whale/src/dbi/x86/distorm/mnemonics.c | 312 + .../cpp/whale/src/dbi/x86/distorm/mnemonics.h | 301 + .../cpp/whale/src/dbi/x86/distorm/operands.c | 1290 + .../cpp/whale/src/dbi/x86/distorm/operands.h | 28 + .../cpp/whale/src/dbi/x86/distorm/prefix.c | 368 + .../cpp/whale/src/dbi/x86/distorm/prefix.h | 64 + .../cpp/whale/src/dbi/x86/distorm/textdefs.c | 172 + .../cpp/whale/src/dbi/x86/distorm/textdefs.h | 57 + .../cpp/whale/src/dbi/x86/distorm/wstring.c | 47 + .../cpp/whale/src/dbi/x86/distorm/wstring.h | 35 + .../cpp/whale/src/dbi/x86/distorm/x86defs.h | 82 + .../cpp/whale/src/dbi/x86/inline_hook_x86.cc | 71 + .../cpp/whale/src/dbi/x86/inline_hook_x86.h | 35 + .../src/dbi/x86/instruction_rewriter_x86.cc | 119 + .../src/dbi/x86/instruction_rewriter_x86.h | 66 + .../src/dbi/x86/intercept_syscall_x86.cc | 28 + .../whale/src/dbi/x86/intercept_syscall_x86.h | 20 + .../src/dbi/x86_64/inline_hook_x86_64.cc | 73 + .../whale/src/dbi/x86_64/inline_hook_x86_64.h | 35 + .../dbi/x86_64/instruction_rewriter_x86_64.cc | 111 + .../dbi/x86_64/instruction_rewriter_x86_64.h | 66 + module/src/main/cpp/whale/src/interceptor.cc | 32 + module/src/main/cpp/whale/src/interceptor.h | 32 + .../cpp/whale/src/libffi/aarch64/ffi_arm64.c | 946 + .../cpp/whale/src/libffi/aarch64/internal.h | 72 + .../cpp/whale/src/libffi/aarch64/sysv_arm64.S | 441 + .../main/cpp/whale/src/libffi/arm/ffi_armv7.c | 824 + .../main/cpp/whale/src/libffi/arm/internal.h | 12 + .../cpp/whale/src/libffi/arm/sysv_armv7.S | 388 + .../src/main/cpp/whale/src/libffi/closures.c | 966 + module/src/main/cpp/whale/src/libffi/debug.c | 64 + .../src/main/cpp/whale/src/libffi/dlmalloc.c | 5169 ++ module/src/main/cpp/whale/src/libffi/ffi.h | 24 + .../src/main/cpp/whale/src/libffi/ffi_cfi.h | 55 + .../main/cpp/whale/src/libffi/ffi_common.h | 149 + .../src/main/cpp/whale/src/libffi/ffi_cxx.cc | 68 + .../src/main/cpp/whale/src/libffi/ffi_cxx.h | 117 + .../src/main/cpp/whale/src/libffi/fficonfig.h | 21 + .../src/main/cpp/whale/src/libffi/ffitarget.h | 24 + .../main/cpp/whale/src/libffi/java_raw_api.c | 374 + .../src/libffi/platform_include/ffi_arm64.h | 516 + .../src/libffi/platform_include/ffi_armv7.h | 516 + .../src/libffi/platform_include/ffi_i386.h | 516 + .../src/libffi/platform_include/ffi_x86_64.h | 516 + .../libffi/platform_include/fficonfig_arm64.h | 224 + .../libffi/platform_include/fficonfig_armv7.h | 224 + .../libffi/platform_include/fficonfig_i386.h | 220 + .../platform_include/fficonfig_x86_64.h | 220 + .../libffi/platform_include/ffitarget_arm64.h | 86 + .../libffi/platform_include/ffitarget_armv7.h | 87 + .../libffi/platform_include/ffitarget_i386.h | 152 + .../platform_include/ffitarget_x86_64.h | 152 + .../src/main/cpp/whale/src/libffi/prep_cif.c | 261 + .../src/main/cpp/whale/src/libffi/raw_api.c | 267 + module/src/main/cpp/whale/src/libffi/types.c | 108 + .../main/cpp/whale/src/libffi/x86/asmnames.h | 35 + .../cpp/whale/src/libffi/x86/ffi64_x86_64.c | 889 + .../main/cpp/whale/src/libffi/x86/ffi_i386.c | 759 + .../cpp/whale/src/libffi/x86/ffiw64_x86_64.c | 313 + .../main/cpp/whale/src/libffi/x86/internal.h | 34 + .../cpp/whale/src/libffi/x86/internal64.h | 27 + .../main/cpp/whale/src/libffi/x86/sysv_i386.S | 1134 + .../cpp/whale/src/libffi/x86/unix64_x86_64.S | 571 + .../cpp/whale/src/libffi/x86/win64_x86_64.S | 237 + .../cpp/whale/src/platform/linux/elf_image.cc | 203 + .../cpp/whale/src/platform/linux/elf_image.h | 157 + .../whale/src/platform/linux/process_map.cc | 82 + .../whale/src/platform/linux/process_map.h | 44 + .../src/main/cpp/whale/src/platform/memory.cc | 80 + .../src/main/cpp/whale/src/platform/memory.h | 22 + .../cpp/whale/src/simulator/code_simulator.cc | 20 + .../cpp/whale/src/simulator/code_simulator.h | 35 + .../src/simulator/code_simulator_arm64.cc | 54 + .../src/simulator/code_simulator_arm64.h | 43 + .../src/simulator/code_simulator_container.cc | 17 + .../src/simulator/code_simulator_container.h | 43 + module/src/main/cpp/whale/src/whale.cc | 140 + settings.gradle | 1 + template/magisk_module/.gitattributes | 10 + .../META-INF/com/google/android/update-binary | 173 + .../com/google/android/updater-script | 1 + template/magisk_module/README.md | 1 + template/magisk_module/customize.sh | 65 + template/magisk_module/post-fs-data.sh | 13 + template/magisk_module/riru.sh | 32 + template/magisk_module/uninstall.sh | 6 + template/magisk_module/verify.sh | 39 + 299 files changed, 263158 insertions(+), 1 deletion(-) create mode 100644 .gitattributes create mode 100644 .gitignore create mode 100644 build.gradle create mode 100644 gradle.properties create mode 100644 gradle/wrapper/gradle-wrapper.jar create mode 100644 gradle/wrapper/gradle-wrapper.properties create mode 100644 gradlew create mode 100644 gradlew.bat create mode 100644 module.gradle create mode 100644 module/.gitignore create mode 100644 module/build.gradle create mode 100644 module/src/main/AndroidManifest.xml create mode 100644 module/src/main/cpp/CMakeLists.txt create mode 100644 module/src/main/cpp/game.h create mode 100644 module/src/main/cpp/hook.cpp create mode 100644 module/src/main/cpp/hook.h create mode 100644 module/src/main/cpp/il2cpp-tabledefs.h create mode 100644 module/src/main/cpp/il2cpp.cpp create mode 100644 module/src/main/cpp/il2cpp.h create mode 100644 module/src/main/cpp/il2cppapi/2017.1.0f3/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/2017.1.0f3/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/2017.1.3f1/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/2017.1.3f1/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/2017.2.0f3/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/2017.2.0f3/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/2017.2.1f1/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/2017.2.1f1/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/2018.1.0f2/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/2018.1.0f2/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/2018.2.0f2/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/2018.2.0f2/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/2018.3.0f2/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/2018.3.0f2/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/2018.3.8f1/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/2018.3.8f1/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/2018.4.18f1/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/2018.4.18f1/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/2019.1.0f2/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/2019.1.0f2/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/2019.2.0f1/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/2019.2.0f1/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/2019.3.0f6/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/2019.3.0f6/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/2019.3.7f1/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/2019.3.7f1/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/5.3.2f1/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/5.3.2f1/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/5.3.3f1/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/5.3.3f1/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/5.3.5f1/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/5.3.5f1/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/5.3.6f1/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/5.3.6f1/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/5.3.7f1/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/5.3.7f1/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/5.4.0f3/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/5.4.0f3/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/5.4.1f1/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/5.4.1f1/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/5.4.4f1/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/5.4.4f1/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/5.5.0f3/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/5.5.0f3/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/5.5.1f1/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/5.5.1f1/il2cpp-class.h create mode 100644 module/src/main/cpp/il2cppapi/5.6.0f3/il2cpp-api-functions.h create mode 100644 module/src/main/cpp/il2cppapi/5.6.0f3/il2cpp-class.h create mode 100644 module/src/main/cpp/log.h create mode 100644 module/src/main/cpp/main.cpp create mode 100644 module/src/main/cpp/whale/CMakeLists.txt create mode 100644 module/src/main/cpp/whale/include/whale.h create mode 100644 module/src/main/cpp/whale/src/android/android_build.h create mode 100644 module/src/main/cpp/whale/src/android/art/art_hook_param.h create mode 100644 module/src/main/cpp/whale/src/android/art/art_jni_trampoline.cc create mode 100644 module/src/main/cpp/whale/src/android/art/art_jni_trampoline.h create mode 100644 module/src/main/cpp/whale/src/android/art/art_method.cc create mode 100644 module/src/main/cpp/whale/src/android/art/art_method.h create mode 100644 module/src/main/cpp/whale/src/android/art/art_runtime.cc create mode 100644 module/src/main/cpp/whale/src/android/art/art_runtime.h create mode 100644 module/src/main/cpp/whale/src/android/art/art_symbol_resolver.cc create mode 100644 module/src/main/cpp/whale/src/android/art/art_symbol_resolver.h create mode 100644 module/src/main/cpp/whale/src/android/art/java_types.cc create mode 100644 module/src/main/cpp/whale/src/android/art/java_types.h create mode 100644 module/src/main/cpp/whale/src/android/art/modifiers.h create mode 100644 module/src/main/cpp/whale/src/android/art/native_on_load.cc create mode 100644 module/src/main/cpp/whale/src/android/art/native_on_load.h create mode 100644 module/src/main/cpp/whale/src/android/art/scoped_thread_state_change.cc create mode 100644 module/src/main/cpp/whale/src/android/art/scoped_thread_state_change.h create mode 100644 module/src/main/cpp/whale/src/android/art/well_known_classes.cc create mode 100644 module/src/main/cpp/whale/src/android/art/well_known_classes.h create mode 100644 module/src/main/cpp/whale/src/android/jni_helper.h create mode 100644 module/src/main/cpp/whale/src/android/native_bridge.h create mode 100644 module/src/main/cpp/whale/src/assembler/assembler.cc create mode 100644 module/src/main/cpp/whale/src/assembler/assembler.h create mode 100644 module/src/main/cpp/whale/src/assembler/label.h create mode 100644 module/src/main/cpp/whale/src/assembler/managed_register.h create mode 100644 module/src/main/cpp/whale/src/assembler/memory_region.cc create mode 100644 module/src/main/cpp/whale/src/assembler/memory_region.h create mode 100644 module/src/main/cpp/whale/src/assembler/value_object.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/CMakeLists.txt create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch32/assembler-aarch32.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch32/assembler-aarch32.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch32/constants-aarch32.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch32/constants-aarch32.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch32/disasm-aarch32.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch32/disasm-aarch32.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch32/instructions-aarch32.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch32/instructions-aarch32.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch32/location-aarch32.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch32/location-aarch32.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch32/macro-assembler-aarch32.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch32/macro-assembler-aarch32.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch32/operands-aarch32.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch32/operands-aarch32.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/abi-aarch64.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/assembler-aarch64.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/assembler-aarch64.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/constants-aarch64.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/cpu-aarch64.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/cpu-aarch64.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/cpu-features-auditor-aarch64.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/cpu-features-auditor-aarch64.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/decoder-aarch64.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/decoder-aarch64.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/disasm-aarch64.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/disasm-aarch64.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/instructions-aarch64.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/instructions-aarch64.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/instrument-aarch64.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/instrument-aarch64.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/logic-aarch64.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/macro-assembler-aarch64.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/macro-assembler-aarch64.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/operands-aarch64.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/operands-aarch64.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/pointer-auth-aarch64.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/simulator-aarch64.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/simulator-aarch64.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/aarch64/simulator-constants-aarch64.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/assembler-base-vixl.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/code-buffer-vixl.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/code-buffer-vixl.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/code-generation-scopes-vixl.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/compiler-intrinsics-vixl.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/compiler-intrinsics-vixl.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/cpu-features.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/cpu-features.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/globals-vixl.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/invalset-vixl.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/macro-assembler-interface.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/platform-vixl.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/pool-manager-impl.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/pool-manager.h create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/utils-vixl.cc create mode 100644 module/src/main/cpp/whale/src/assembler/vixl/utils-vixl.h create mode 100644 module/src/main/cpp/whale/src/assembler/x86/assembler_x86.cc create mode 100644 module/src/main/cpp/whale/src/assembler/x86/assembler_x86.h create mode 100644 module/src/main/cpp/whale/src/assembler/x86/constants_x86.h create mode 100644 module/src/main/cpp/whale/src/assembler/x86/managed_register_x86.cc create mode 100644 module/src/main/cpp/whale/src/assembler/x86/managed_register_x86.h create mode 100644 module/src/main/cpp/whale/src/assembler/x86/registers_x86.h create mode 100644 module/src/main/cpp/whale/src/assembler/x86_64/assembler_x86_64.cc create mode 100644 module/src/main/cpp/whale/src/assembler/x86_64/assembler_x86_64.h create mode 100644 module/src/main/cpp/whale/src/assembler/x86_64/constants_x86_64.h create mode 100644 module/src/main/cpp/whale/src/assembler/x86_64/managed_register_x86_64.cc create mode 100644 module/src/main/cpp/whale/src/assembler/x86_64/managed_register_x86_64.h create mode 100644 module/src/main/cpp/whale/src/assembler/x86_64/registers_x86_64.h create mode 100644 module/src/main/cpp/whale/src/base/align.h create mode 100644 module/src/main/cpp/whale/src/base/array_ref.h create mode 100644 module/src/main/cpp/whale/src/base/bit_utils.h create mode 100644 module/src/main/cpp/whale/src/base/cxx_helper.h create mode 100644 module/src/main/cpp/whale/src/base/enums.h create mode 100644 module/src/main/cpp/whale/src/base/logging.h create mode 100644 module/src/main/cpp/whale/src/base/macros.h create mode 100644 module/src/main/cpp/whale/src/base/offsets.h create mode 100644 module/src/main/cpp/whale/src/base/primitive_types.h create mode 100644 module/src/main/cpp/whale/src/base/singleton.h create mode 100644 module/src/main/cpp/whale/src/base/stringprintf.h create mode 100644 module/src/main/cpp/whale/src/dbi/arm/decoder_arm.cc create mode 100644 module/src/main/cpp/whale/src/dbi/arm/decoder_arm.h create mode 100644 module/src/main/cpp/whale/src/dbi/arm/decoder_thumb.cc create mode 100644 module/src/main/cpp/whale/src/dbi/arm/decoder_thumb.h create mode 100644 module/src/main/cpp/whale/src/dbi/arm/inline_hook_arm.cc create mode 100644 module/src/main/cpp/whale/src/dbi/arm/inline_hook_arm.h create mode 100644 module/src/main/cpp/whale/src/dbi/arm/instruction_rewriter_arm.cc create mode 100644 module/src/main/cpp/whale/src/dbi/arm/instruction_rewriter_arm.h create mode 100644 module/src/main/cpp/whale/src/dbi/arm/registers_arm.h create mode 100644 module/src/main/cpp/whale/src/dbi/arm64/decoder_arm64.cc create mode 100644 module/src/main/cpp/whale/src/dbi/arm64/decoder_arm64.h create mode 100644 module/src/main/cpp/whale/src/dbi/arm64/inline_hook_arm64.cc create mode 100644 module/src/main/cpp/whale/src/dbi/arm64/inline_hook_arm64.h create mode 100644 module/src/main/cpp/whale/src/dbi/arm64/instruction_rewriter_arm64.cc create mode 100644 module/src/main/cpp/whale/src/dbi/arm64/instruction_rewriter_arm64.h create mode 100644 module/src/main/cpp/whale/src/dbi/arm64/registers_arm64.h create mode 100644 module/src/main/cpp/whale/src/dbi/backup_code.h create mode 100644 module/src/main/cpp/whale/src/dbi/darwin/macho_import_hook.cc create mode 100644 module/src/main/cpp/whale/src/dbi/darwin/macho_import_hook.h create mode 100644 module/src/main/cpp/whale/src/dbi/hook_common.cc create mode 100644 module/src/main/cpp/whale/src/dbi/hook_common.h create mode 100644 module/src/main/cpp/whale/src/dbi/instruction_rewriter.h create mode 100644 module/src/main/cpp/whale/src/dbi/instruction_set.cc create mode 100644 module/src/main/cpp/whale/src/dbi/instruction_set.h create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/config.h create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/decoder.c create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/decoder.h create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/distorm.c create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/distorm.h create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/instructions.c create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/instructions.h create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/insts.c create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/insts.h create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/mnemonics.c create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/mnemonics.h create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/operands.c create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/operands.h create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/prefix.c create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/prefix.h create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/textdefs.c create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/textdefs.h create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/wstring.c create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/wstring.h create mode 100644 module/src/main/cpp/whale/src/dbi/x86/distorm/x86defs.h create mode 100644 module/src/main/cpp/whale/src/dbi/x86/inline_hook_x86.cc create mode 100644 module/src/main/cpp/whale/src/dbi/x86/inline_hook_x86.h create mode 100644 module/src/main/cpp/whale/src/dbi/x86/instruction_rewriter_x86.cc create mode 100644 module/src/main/cpp/whale/src/dbi/x86/instruction_rewriter_x86.h create mode 100644 module/src/main/cpp/whale/src/dbi/x86/intercept_syscall_x86.cc create mode 100644 module/src/main/cpp/whale/src/dbi/x86/intercept_syscall_x86.h create mode 100644 module/src/main/cpp/whale/src/dbi/x86_64/inline_hook_x86_64.cc create mode 100644 module/src/main/cpp/whale/src/dbi/x86_64/inline_hook_x86_64.h create mode 100644 module/src/main/cpp/whale/src/dbi/x86_64/instruction_rewriter_x86_64.cc create mode 100644 module/src/main/cpp/whale/src/dbi/x86_64/instruction_rewriter_x86_64.h create mode 100644 module/src/main/cpp/whale/src/interceptor.cc create mode 100644 module/src/main/cpp/whale/src/interceptor.h create mode 100644 module/src/main/cpp/whale/src/libffi/aarch64/ffi_arm64.c create mode 100644 module/src/main/cpp/whale/src/libffi/aarch64/internal.h create mode 100644 module/src/main/cpp/whale/src/libffi/aarch64/sysv_arm64.S create mode 100644 module/src/main/cpp/whale/src/libffi/arm/ffi_armv7.c create mode 100644 module/src/main/cpp/whale/src/libffi/arm/internal.h create mode 100644 module/src/main/cpp/whale/src/libffi/arm/sysv_armv7.S create mode 100644 module/src/main/cpp/whale/src/libffi/closures.c create mode 100644 module/src/main/cpp/whale/src/libffi/debug.c create mode 100644 module/src/main/cpp/whale/src/libffi/dlmalloc.c create mode 100644 module/src/main/cpp/whale/src/libffi/ffi.h create mode 100644 module/src/main/cpp/whale/src/libffi/ffi_cfi.h create mode 100644 module/src/main/cpp/whale/src/libffi/ffi_common.h create mode 100644 module/src/main/cpp/whale/src/libffi/ffi_cxx.cc create mode 100644 module/src/main/cpp/whale/src/libffi/ffi_cxx.h create mode 100644 module/src/main/cpp/whale/src/libffi/fficonfig.h create mode 100644 module/src/main/cpp/whale/src/libffi/ffitarget.h create mode 100644 module/src/main/cpp/whale/src/libffi/java_raw_api.c create mode 100644 module/src/main/cpp/whale/src/libffi/platform_include/ffi_arm64.h create mode 100644 module/src/main/cpp/whale/src/libffi/platform_include/ffi_armv7.h create mode 100644 module/src/main/cpp/whale/src/libffi/platform_include/ffi_i386.h create mode 100644 module/src/main/cpp/whale/src/libffi/platform_include/ffi_x86_64.h create mode 100644 module/src/main/cpp/whale/src/libffi/platform_include/fficonfig_arm64.h create mode 100644 module/src/main/cpp/whale/src/libffi/platform_include/fficonfig_armv7.h create mode 100644 module/src/main/cpp/whale/src/libffi/platform_include/fficonfig_i386.h create mode 100644 module/src/main/cpp/whale/src/libffi/platform_include/fficonfig_x86_64.h create mode 100644 module/src/main/cpp/whale/src/libffi/platform_include/ffitarget_arm64.h create mode 100644 module/src/main/cpp/whale/src/libffi/platform_include/ffitarget_armv7.h create mode 100644 module/src/main/cpp/whale/src/libffi/platform_include/ffitarget_i386.h create mode 100644 module/src/main/cpp/whale/src/libffi/platform_include/ffitarget_x86_64.h create mode 100644 module/src/main/cpp/whale/src/libffi/prep_cif.c create mode 100644 module/src/main/cpp/whale/src/libffi/raw_api.c create mode 100644 module/src/main/cpp/whale/src/libffi/types.c create mode 100644 module/src/main/cpp/whale/src/libffi/x86/asmnames.h create mode 100644 module/src/main/cpp/whale/src/libffi/x86/ffi64_x86_64.c create mode 100644 module/src/main/cpp/whale/src/libffi/x86/ffi_i386.c create mode 100644 module/src/main/cpp/whale/src/libffi/x86/ffiw64_x86_64.c create mode 100644 module/src/main/cpp/whale/src/libffi/x86/internal.h create mode 100644 module/src/main/cpp/whale/src/libffi/x86/internal64.h create mode 100644 module/src/main/cpp/whale/src/libffi/x86/sysv_i386.S create mode 100644 module/src/main/cpp/whale/src/libffi/x86/unix64_x86_64.S create mode 100644 module/src/main/cpp/whale/src/libffi/x86/win64_x86_64.S create mode 100644 module/src/main/cpp/whale/src/platform/linux/elf_image.cc create mode 100644 module/src/main/cpp/whale/src/platform/linux/elf_image.h create mode 100644 module/src/main/cpp/whale/src/platform/linux/process_map.cc create mode 100644 module/src/main/cpp/whale/src/platform/linux/process_map.h create mode 100644 module/src/main/cpp/whale/src/platform/memory.cc create mode 100644 module/src/main/cpp/whale/src/platform/memory.h create mode 100644 module/src/main/cpp/whale/src/simulator/code_simulator.cc create mode 100644 module/src/main/cpp/whale/src/simulator/code_simulator.h create mode 100644 module/src/main/cpp/whale/src/simulator/code_simulator_arm64.cc create mode 100644 module/src/main/cpp/whale/src/simulator/code_simulator_arm64.h create mode 100644 module/src/main/cpp/whale/src/simulator/code_simulator_container.cc create mode 100644 module/src/main/cpp/whale/src/simulator/code_simulator_container.h create mode 100644 module/src/main/cpp/whale/src/whale.cc create mode 100644 settings.gradle create mode 100644 template/magisk_module/.gitattributes create mode 100644 template/magisk_module/META-INF/com/google/android/update-binary create mode 100644 template/magisk_module/META-INF/com/google/android/updater-script create mode 100644 template/magisk_module/README.md create mode 100644 template/magisk_module/customize.sh create mode 100644 template/magisk_module/post-fs-data.sh create mode 100644 template/magisk_module/riru.sh create mode 100644 template/magisk_module/uninstall.sh create mode 100644 template/magisk_module/verify.sh diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..1341b049 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +*.prop text eol=lf +*.sh text eol=lf \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..fc008402 --- /dev/null +++ b/.gitignore @@ -0,0 +1,14 @@ +*.iml +.gradle +/local.properties +.idea +/.idea/caches/build_file_checksums.ser +/.idea/libraries +/.idea/modules.xml +/.idea/workspace.xml +.DS_Store +/build +/captures +/out +.externalNativeBuild +.cxx \ No newline at end of file diff --git a/README.md b/README.md index 8e88f24b..03391b84 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,17 @@ # Riru-Il2CppDumper -Il2CppDumper Riru模块 +Riru版Il2CppDumper,在游戏运行时dump数据,用于绕过保护,加密以及混淆。 + +## 如何食用 +1. 安装[Magisk](https://github.com/topjohnwu/Magisk)和[Riru](https://github.com/RikkaApps/Riru) +2. 下载源码 +3. 编辑`game.h`, 修改游戏包名`GamePackageName`和Unity版本`UnityVersion`,`UnityVersion`的值请看文件内注释 +4. 编译 +5. 在Magisk里安装模块 +6. 启动游戏,会在`/data/data/GamePackageName/files/`目录下生成`dump.cs` + +## TODO +- [ ] 强化搜索 +- [ ] 完善dump.cs输出 +- [ ] 2018.3.0f2(24.1)及以上版本使用`il2cpp_image_get_class` +- [ ] 泛型相关输出 +- [ ] 生成IDA脚本,头文件 diff --git a/build.gradle b/build.gradle new file mode 100644 index 00000000..7cea6813 --- /dev/null +++ b/build.gradle @@ -0,0 +1,33 @@ +apply plugin: 'idea' + +idea.module { + excludeDirs += file('out') + resourceDirs += file('template') + resourceDirs += file('scripts') +} + +buildscript { + repositories { + google() + jcenter() + } + dependencies { + classpath 'com.android.tools.build:gradle:3.5.3' + } +} + +allprojects { + repositories { + google() + jcenter() + } +} + +task clean(type: Delete) { + delete rootProject.buildDir +} + +ext { + minSdkVersion = 23 + targetSdkVersion = 29 +} diff --git a/gradle.properties b/gradle.properties new file mode 100644 index 00000000..c73d2393 --- /dev/null +++ b/gradle.properties @@ -0,0 +1,19 @@ +# Project-wide Gradle settings. +# IDE (e.g. Android Studio) users: +# Gradle settings configured through the IDE *will override* +# any settings specified in this file. +# For more details on how to configure your build environment visit +# http://www.gradle.org/docs/current/userguide/build_environment.html +# Specifies the JVM arguments used for the daemon process. +# The setting is particularly useful for tweaking memory settings. +org.gradle.jvmargs=-Xmx1536m +# When configured, Gradle will run in incubating parallel mode. +# This option should only be used with decoupled projects. More details, visit +# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects +# org.gradle.parallel=true +# AndroidX package structure to make it clearer which packages are bundled with the +# Android operating system, and which are packaged with your app's APK +# https://developer.android.com/topic/libraries/support-library/androidx-rn +android.useAndroidX=true +# Automatically convert third-party libraries to use AndroidX +android.enableJetifier=true diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000000000000000000000000000000000000..f6b961fd5a86aa5fbfe90f707c3138408be7c718 GIT binary patch literal 54329 zcmagFV|ZrKvM!pAZQHhO+qP}9lTNj?q^^Y^VFp)SH8qbSJ)2BQ2giqr}t zFG7D6)c?v~^Z#E_K}1nTQbJ9gQ9<%vVRAxVj)8FwL5_iTdUB>&m3fhE=kRWl;g`&m z!W5kh{WsV%fO*%je&j+Lv4xxK~zsEYQls$Q-p&dwID|A)!7uWtJF-=Tm1{V@#x*+kUI$=%KUuf2ka zjiZ{oiL1MXE2EjciJM!jrjFNwCh`~hL>iemrqwqnX?T*MX;U>>8yRcZb{Oy+VKZos zLiFKYPw=LcaaQt8tj=eoo3-@bG_342HQ%?jpgAE?KCLEHC+DmjxAfJ%Og^$dpC8Xw zAcp-)tfJm}BPNq_+6m4gBgBm3+CvmL>4|$2N$^Bz7W(}fz1?U-u;nE`+9`KCLuqg} zwNstNM!J4Uw|78&Y9~9>MLf56to!@qGkJw5Thx%zkzj%Ek9Nn1QA@8NBXbwyWC>9H z#EPwjMNYPigE>*Ofz)HfTF&%PFj$U6mCe-AFw$U%-L?~-+nSXHHKkdgC5KJRTF}`G zE_HNdrE}S0zf4j{r_f-V2imSqW?}3w-4=f@o@-q+cZgaAbZ((hn))@|eWWhcT2pLpTpL!;_5*vM=sRL8 zqU##{U#lJKuyqW^X$ETU5ETeEVzhU|1m1750#f}38_5N9)B_2|v@1hUu=Kt7-@dhA zq_`OMgW01n`%1dB*}C)qxC8q;?zPeF_r;>}%JYmlER_1CUbKa07+=TV45~symC*g8 zW-8(gag#cAOuM0B1xG8eTp5HGVLE}+gYTmK=`XVVV*U!>H`~j4+ROIQ+NkN$LY>h4 zqpwdeE_@AX@PL};e5vTn`Ro(EjHVf$;^oiA%@IBQq>R7_D>m2D4OwwEepkg}R_k*M zM-o;+P27087eb+%*+6vWFCo9UEGw>t&WI17Pe7QVuoAoGHdJ(TEQNlJOqnjZ8adCb zI`}op16D@v7UOEo%8E-~m?c8FL1utPYlg@m$q@q7%mQ4?OK1h%ODjTjFvqd!C z-PI?8qX8{a@6d&Lb_X+hKxCImb*3GFemm?W_du5_&EqRq!+H?5#xiX#w$eLti-?E$;Dhu`{R(o>LzM4CjO>ICf z&DMfES#FW7npnbcuqREgjPQM#gs6h>`av_oEWwOJZ2i2|D|0~pYd#WazE2Bbsa}X@ zu;(9fi~%!VcjK6)?_wMAW-YXJAR{QHxrD5g(ou9mR6LPSA4BRG1QSZT6A?kelP_g- zH(JQjLc!`H4N=oLw=f3{+WmPA*s8QEeEUf6Vg}@!xwnsnR0bl~^2GSa5vb!Yl&4!> zWb|KQUsC$lT=3A|7vM9+d;mq=@L%uWKwXiO9}a~gP4s_4Yohc!fKEgV7WbVo>2ITbE*i`a|V!^p@~^<={#?Gz57 zyPWeM2@p>D*FW#W5Q`1`#5NW62XduP1XNO(bhg&cX`-LYZa|m-**bu|>}S;3)eP8_ zpNTnTfm8 ze+7wDH3KJ95p)5tlwk`S7mbD`SqHnYD*6`;gpp8VdHDz%RR_~I_Ar>5)vE-Pgu7^Y z|9Px+>pi3!DV%E%4N;ii0U3VBd2ZJNUY1YC^-e+{DYq+l@cGtmu(H#Oh%ibUBOd?C z{y5jW3v=0eV0r@qMLgv1JjZC|cZ9l9Q)k1lLgm))UR@#FrJd>w^`+iy$c9F@ic-|q zVHe@S2UAnc5VY_U4253QJxm&Ip!XKP8WNcnx9^cQ;KH6PlW8%pSihSH2(@{2m_o+m zr((MvBja2ctg0d0&U5XTD;5?d?h%JcRJp{_1BQW1xu&BrA3(a4Fh9hon-ly$pyeHq zG&;6q?m%NJ36K1Sq_=fdP(4f{Hop;_G_(i?sPzvB zDM}>*(uOsY0I1j^{$yn3#U(;B*g4cy$-1DTOkh3P!LQ;lJlP%jY8}Nya=h8$XD~%Y zbV&HJ%eCD9nui-0cw!+n`V~p6VCRqh5fRX z8`GbdZ@73r7~myQLBW%db;+BI?c-a>Y)m-FW~M=1^|<21_Sh9RT3iGbO{o-hpN%d6 z7%++#WekoBOP^d0$$|5npPe>u3PLvX_gjH2x(?{&z{jJ2tAOWTznPxv-pAv<*V7r$ z6&glt>7CAClWz6FEi3bToz-soY^{ScrjwVPV51=>n->c(NJngMj6TyHty`bfkF1hc zkJS%A@cL~QV0-aK4>Id!9dh7>0IV;1J9(myDO+gv76L3NLMUm9XyPauvNu$S<)-|F zZS}(kK_WnB)Cl`U?jsdYfAV4nrgzIF@+%1U8$poW&h^c6>kCx3;||fS1_7JvQT~CV zQ8Js+!p)3oW>Df(-}uqC`Tcd%E7GdJ0p}kYj5j8NKMp(KUs9u7?jQ94C)}0rba($~ zqyBx$(1ae^HEDG`Zc@-rXk1cqc7v0wibOR4qpgRDt#>-*8N3P;uKV0CgJE2SP>#8h z=+;i_CGlv+B^+$5a}SicVaSeaNn29K`C&=}`=#Nj&WJP9Xhz4mVa<+yP6hkrq1vo= z1rX4qg8dc4pmEvq%NAkpMK>mf2g?tg_1k2%v}<3`$6~Wlq@ItJ*PhHPoEh1Yi>v57 z4k0JMO)*=S`tKvR5gb-(VTEo>5Y>DZJZzgR+j6{Y`kd|jCVrg!>2hVjz({kZR z`dLlKhoqT!aI8=S+fVp(5*Dn6RrbpyO~0+?fy;bm$0jmTN|t5i6rxqr4=O}dY+ROd zo9Et|x}!u*xi~>-y>!M^+f&jc;IAsGiM_^}+4|pHRn{LThFFpD{bZ|TA*wcGm}XV^ zr*C6~@^5X-*R%FrHIgo-hJTBcyQ|3QEj+cSqp#>&t`ZzB?cXM6S(lRQw$I2?m5=wd z78ki`R?%;o%VUhXH?Z#(uwAn9$m`npJ=cA+lHGk@T7qq_M6Zoy1Lm9E0UUysN)I_x zW__OAqvku^>`J&CB=ie@yNWsaFmem}#L3T(x?a`oZ+$;3O-icj2(5z72Hnj=9Z0w% z<2#q-R=>hig*(t0^v)eGq2DHC%GymE-_j1WwBVGoU=GORGjtaqr0BNigOCqyt;O(S zKG+DoBsZU~okF<7ahjS}bzwXxbAxFfQAk&O@>LsZMsZ`?N?|CDWM(vOm%B3CBPC3o z%2t@%H$fwur}SSnckUm0-k)mOtht`?nwsDz=2#v=RBPGg39i#%odKq{K^;bTD!6A9 zskz$}t)sU^=a#jLZP@I=bPo?f-L}wpMs{Tc!m7-bi!Ldqj3EA~V;4(dltJmTXqH0r z%HAWKGutEc9vOo3P6Q;JdC^YTnby->VZ6&X8f{obffZ??1(cm&L2h7q)*w**+sE6dG*;(H|_Q!WxU{g)CeoT z(KY&bv!Usc|m+Fqfmk;h&RNF|LWuNZ!+DdX*L=s-=_iH=@i` z?Z+Okq^cFO4}_n|G*!)Wl_i%qiMBaH8(WuXtgI7EO=M>=i_+;MDjf3aY~6S9w0K zUuDO7O5Ta6+k40~xh~)D{=L&?Y0?c$s9cw*Ufe18)zzk%#ZY>Tr^|e%8KPb0ht`b( zuP@8#Ox@nQIqz9}AbW0RzE`Cf>39bOWz5N3qzS}ocxI=o$W|(nD~@EhW13Rj5nAp; zu2obEJa=kGC*#3=MkdkWy_%RKcN=?g$7!AZ8vBYKr$ePY(8aIQ&yRPlQ=mudv#q$q z4%WzAx=B{i)UdLFx4os?rZp6poShD7Vc&mSD@RdBJ=_m^&OlkEE1DFU@csgKcBifJ zz4N7+XEJhYzzO=86 z#%eBQZ$Nsf2+X0XPHUNmg#(sNt^NW1Y0|M(${e<0kW6f2q5M!2YE|hSEQ*X-%qo(V zHaFwyGZ0on=I{=fhe<=zo{=Og-_(to3?cvL4m6PymtNsdDINsBh8m>a%!5o3s(en) z=1I z6O+YNertC|OFNqd6P=$gMyvmfa`w~p9*gKDESFqNBy(~Zw3TFDYh}$iudn)9HxPBi zdokK@o~nu?%imcURr5Y~?6oo_JBe}t|pU5qjai|#JDyG=i^V~7+a{dEnO<(y>ahND#_X_fcEBNiZ)uc&%1HVtx8Ts z*H_Btvx^IhkfOB#{szN*n6;y05A>3eARDXslaE>tnLa>+`V&cgho?ED+&vv5KJszf zG4@G;7i;4_bVvZ>!mli3j7~tPgybF5|J6=Lt`u$D%X0l}#iY9nOXH@(%FFJLtzb%p zzHfABnSs;v-9(&nzbZytLiqqDIWzn>JQDk#JULcE5CyPq_m#4QV!}3421haQ+LcfO*>r;rg6K|r#5Sh|y@h1ao%Cl)t*u`4 zMTP!deC?aL7uTxm5^nUv#q2vS-5QbBKP|drbDXS%erB>fYM84Kpk^au99-BQBZR z7CDynflrIAi&ahza+kUryju5LR_}-Z27g)jqOc(!Lx9y)e z{cYc&_r947s9pteaa4}dc|!$$N9+M38sUr7h(%@Ehq`4HJtTpA>B8CLNO__@%(F5d z`SmX5jbux6i#qc}xOhumzbAELh*Mfr2SW99=WNOZRZgoCU4A2|4i|ZVFQt6qEhH#B zK_9G;&h*LO6tB`5dXRSBF0hq0tk{2q__aCKXYkP#9n^)@cq}`&Lo)1KM{W+>5mSed zKp~=}$p7>~nK@va`vN{mYzWN1(tE=u2BZhga5(VtPKk(*TvE&zmn5vSbjo zZLVobTl%;t@6;4SsZ>5+U-XEGUZGG;+~|V(pE&qqrp_f~{_1h@5ZrNETqe{bt9ioZ z#Qn~gWCH!t#Ha^n&fT2?{`}D@s4?9kXj;E;lWV9Zw8_4yM0Qg-6YSsKgvQ*fF{#Pq z{=(nyV>#*`RloBVCs;Lp*R1PBIQOY=EK4CQa*BD0MsYcg=opP?8;xYQDSAJBeJpw5 zPBc_Ft9?;<0?pBhCmOtWU*pN*;CkjJ_}qVic`}V@$TwFi15!mF1*m2wVX+>5p%(+R zQ~JUW*zWkalde{90@2v+oVlkxOZFihE&ZJ){c?hX3L2@R7jk*xjYtHi=}qb+4B(XJ z$gYcNudR~4Kz_WRq8eS((>ALWCO)&R-MXE+YxDn9V#X{_H@j616<|P(8h(7z?q*r+ zmpqR#7+g$cT@e&(%_|ipI&A%9+47%30TLY(yuf&*knx1wNx|%*H^;YB%ftt%5>QM= z^i;*6_KTSRzQm%qz*>cK&EISvF^ovbS4|R%)zKhTH_2K>jP3mBGn5{95&G9^a#4|K zv+!>fIsR8z{^x4)FIr*cYT@Q4Z{y}};rLHL+atCgHbfX*;+k&37DIgENn&=k(*lKD zG;uL-KAdLn*JQ?@r6Q!0V$xXP=J2i~;_+i3|F;_En;oAMG|I-RX#FwnmU&G}w`7R{ z788CrR-g1DW4h_`&$Z`ctN~{A)Hv_-Bl!%+pfif8wN32rMD zJDs$eVWBYQx1&2sCdB0!vU5~uf)=vy*{}t{2VBpcz<+~h0wb7F3?V^44*&83Z2#F` z32!rd4>uc63rQP$3lTH3zb-47IGR}f)8kZ4JvX#toIpXH`L%NnPDE~$QI1)0)|HS4 zVcITo$$oWWwCN@E-5h>N?Hua!N9CYb6f8vTFd>h3q5Jg-lCI6y%vu{Z_Uf z$MU{{^o~;nD_@m2|E{J)q;|BK7rx%`m``+OqZAqAVj-Dy+pD4-S3xK?($>wn5bi90CFAQ+ACd;&m6DQB8_o zjAq^=eUYc1o{#+p+ zn;K<)Pn*4u742P!;H^E3^Qu%2dM{2slouc$AN_3V^M7H_KY3H)#n7qd5_p~Za7zAj|s9{l)RdbV9e||_67`#Tu*c<8!I=zb@ z(MSvQ9;Wrkq6d)!9afh+G`!f$Ip!F<4ADdc*OY-y7BZMsau%y?EN6*hW4mOF%Q~bw z2==Z3^~?q<1GTeS>xGN-?CHZ7a#M4kDL zQxQr~1ZMzCSKFK5+32C%+C1kE#(2L=15AR!er7GKbp?Xd1qkkGipx5Q~FI-6zt< z*PTpeVI)Ngnnyaz5noIIgNZtb4bQdKG{Bs~&tf)?nM$a;7>r36djllw%hQxeCXeW^ z(i6@TEIuxD<2ulwLTt|&gZP%Ei+l!(%p5Yij6U(H#HMkqM8U$@OKB|5@vUiuY^d6X zW}fP3;Kps6051OEO(|JzmVU6SX(8q>*yf*x5QoxDK={PH^F?!VCzES_Qs>()_y|jg6LJlJWp;L zKM*g5DK7>W_*uv}{0WUB0>MHZ#oJZmO!b3MjEc}VhsLD~;E-qNNd?x7Q6~v zR=0$u>Zc2Xr}>x_5$-s#l!oz6I>W?lw;m9Ae{Tf9eMX;TI-Wf_mZ6sVrMnY#F}cDd z%CV*}fDsXUF7Vbw>PuDaGhu631+3|{xp<@Kl|%WxU+vuLlcrklMC!Aq+7n~I3cmQ! z`e3cA!XUEGdEPSu``&lZEKD1IKO(-VGvcnSc153m(i!8ohi`)N2n>U_BemYJ`uY>8B*Epj!oXRLV}XK}>D*^DHQ7?NY*&LJ9VSo`Ogi9J zGa;clWI8vIQqkngv2>xKd91K>?0`Sw;E&TMg&6dcd20|FcTsnUT7Yn{oI5V4@Ow~m zz#k~8TM!A9L7T!|colrC0P2WKZW7PNj_X4MfESbt<-soq*0LzShZ}fyUx!(xIIDwx zRHt^_GAWe0-Vm~bDZ(}XG%E+`XhKpPlMBo*5q_z$BGxYef8O!ToS8aT8pmjbPq)nV z%x*PF5ZuSHRJqJ!`5<4xC*xb2vC?7u1iljB_*iUGl6+yPyjn?F?GOF2_KW&gOkJ?w z3e^qc-te;zez`H$rsUCE0<@7PKGW?7sT1SPYWId|FJ8H`uEdNu4YJjre`8F*D}6Wh z|FQ`xf7yiphHIAkU&OYCn}w^ilY@o4larl?^M7&8YI;hzBIsX|i3UrLsx{QDKwCX< zy;a>yjfJ6!sz`NcVi+a!Fqk^VE^{6G53L?@Tif|j!3QZ0fk9QeUq8CWI;OmO-Hs+F zuZ4sHLA3{}LR2Qlyo+{d@?;`tpp6YB^BMoJt?&MHFY!JQwoa0nTSD+#Ku^4b{5SZVFwU9<~APYbaLO zu~Z)nS#dxI-5lmS-Bnw!(u15by(80LlC@|ynj{TzW)XcspC*}z0~8VRZq>#Z49G`I zgl|C#H&=}n-ajxfo{=pxPV(L*7g}gHET9b*s=cGV7VFa<;Htgjk>KyW@S!|z`lR1( zGSYkEl&@-bZ*d2WQ~hw3NpP=YNHF^XC{TMG$Gn+{b6pZn+5=<()>C!N^jncl0w6BJ zdHdnmSEGK5BlMeZD!v4t5m7ct7{k~$1Ie3GLFoHjAH*b?++s<|=yTF+^I&jT#zuMx z)MLhU+;LFk8bse|_{j+d*a=&cm2}M?*arjBPnfPgLwv)86D$6L zLJ0wPul7IenMvVAK$z^q5<^!)7aI|<&GGEbOr=E;UmGOIa}yO~EIr5xWU_(ol$&fa zR5E(2vB?S3EvJglTXdU#@qfDbCYs#82Yo^aZN6`{Ex#M)easBTe_J8utXu(fY1j|R z9o(sQbj$bKU{IjyhosYahY{63>}$9_+hWxB3j}VQkJ@2$D@vpeRSldU?&7I;qd2MF zSYmJ>zA(@N_iK}m*AMPIJG#Y&1KR)6`LJ83qg~`Do3v^B0>fU&wUx(qefuTgzFED{sJ65!iw{F2}1fQ3= ziFIP{kezQxmlx-!yo+sC4PEtG#K=5VM9YIN0z9~c4XTX?*4e@m;hFM!zVo>A`#566 z>f&3g94lJ{r)QJ5m7Xe3SLau_lOpL;A($wsjHR`;xTXgIiZ#o&vt~ zGR6KdU$FFbLfZCC3AEu$b`tj!9XgOGLSV=QPIYW zjI!hSP#?8pn0@ezuenOzoka8!8~jXTbiJ6+ZuItsWW03uzASFyn*zV2kIgPFR$Yzm zE<$cZlF>R8?Nr2_i?KiripBc+TGgJvG@vRTY2o?(_Di}D30!k&CT`>+7ry2!!iC*X z<@=U0_C#16=PN7bB39w+zPwDOHX}h20Ap);dx}kjXX0-QkRk=cr};GYsjSvyLZa-t zzHONWddi*)RDUH@RTAsGB_#&O+QJaaL+H<<9LLSE+nB@eGF1fALwjVOl8X_sdOYme z0lk!X=S(@25=TZHR7LlPp}fY~yNeThMIjD}pd9+q=j<_inh0$>mIzWVY+Z9p<{D^#0Xk+b_@eNSiR8;KzSZ#7lUsk~NGMcB8C2c=m2l5paHPq`q{S(kdA7Z1a zyfk2Y;w?^t`?@yC5Pz9&pzo}Hc#}mLgDmhKV|PJ3lKOY(Km@Fi2AV~CuET*YfUi}u zfInZnqDX(<#vaS<^fszuR=l)AbqG{}9{rnyx?PbZz3Pyu!eSJK`uwkJU!ORQXy4x83r!PNgOyD33}}L=>xX_93l6njNTuqL8J{l%*3FVn3MG4&Fv*`lBXZ z?=;kn6HTT^#SrPX-N)4EZiIZI!0ByXTWy;;J-Tht{jq1mjh`DSy7yGjHxIaY%*sTx zuy9#9CqE#qi>1misx=KRWm=qx4rk|}vd+LMY3M`ow8)}m$3Ggv&)Ri*ON+}<^P%T5 z_7JPVPfdM=Pv-oH<tecoE}(0O7|YZc*d8`Uv_M*3Rzv7$yZnJE6N_W=AQ3_BgU_TjA_T?a)U1csCmJ&YqMp-lJe`y6>N zt++Bi;ZMOD%%1c&-Q;bKsYg!SmS^#J@8UFY|G3!rtyaTFb!5@e(@l?1t(87ln8rG? z--$1)YC~vWnXiW3GXm`FNSyzu!m$qT=Eldf$sMl#PEfGmzQs^oUd=GIQfj(X=}dw+ zT*oa0*oS%@cLgvB&PKIQ=Ok?>x#c#dC#sQifgMwtAG^l3D9nIg(Zqi;D%807TtUUCL3_;kjyte#cAg?S%e4S2W>9^A(uy8Ss0Tc++ZTjJw1 z&Em2g!3lo@LlDyri(P^I8BPpn$RE7n*q9Q-c^>rfOMM6Pd5671I=ZBjAvpj8oIi$! zl0exNl(>NIiQpX~FRS9UgK|0l#s@#)p4?^?XAz}Gjb1?4Qe4?j&cL$C8u}n)?A@YC zfmbSM`Hl5pQFwv$CQBF=_$Sq zxsV?BHI5bGZTk?B6B&KLdIN-40S426X3j_|ceLla*M3}3gx3(_7MVY1++4mzhH#7# zD>2gTHy*%i$~}mqc#gK83288SKp@y3wz1L_e8fF$Rb}ex+`(h)j}%~Ld^3DUZkgez zOUNy^%>>HHE|-y$V@B}-M|_{h!vXpk01xaD%{l{oQ|~+^>rR*rv9iQen5t?{BHg|% zR`;S|KtUb!X<22RTBA4AAUM6#M?=w5VY-hEV)b`!y1^mPNEoy2K)a>OyA?Q~Q*&(O zRzQI~y_W=IPi?-OJX*&&8dvY0zWM2%yXdFI!D-n@6FsG)pEYdJbuA`g4yy;qrgR?G z8Mj7gv1oiWq)+_$GqqQ$(ZM@#|0j7})=#$S&hZwdoijFI4aCFLVI3tMH5fLreZ;KD zqA`)0l~D2tuIBYOy+LGw&hJ5OyE+@cnZ0L5+;yo2pIMdt@4$r^5Y!x7nHs{@>|W(MzJjATyWGNwZ^4j+EPU0RpAl-oTM@u{lx*i0^yyWPfHt6QwPvYpk9xFMWfBFt!+Gu6TlAmr zeQ#PX71vzN*_-xh&__N`IXv6`>CgV#eA_%e@7wjgkj8jlKzO~Ic6g$cT`^W{R{606 zCDP~+NVZ6DMO$jhL~#+!g*$T!XW63#(ngDn#Qwy71yj^gazS{e;3jGRM0HedGD@pt z?(ln3pCUA(ekqAvvnKy0G@?-|-dh=eS%4Civ&c}s%wF@0K5Bltaq^2Os1n6Z3%?-Q zAlC4goQ&vK6TpgtzkHVt*1!tBYt-`|5HLV1V7*#45Vb+GACuU+QB&hZ=N_flPy0TY zR^HIrdskB#<$aU;HY(K{a3(OQa$0<9qH(oa)lg@Uf>M5g2W0U5 zk!JSlhrw8quBx9A>RJ6}=;W&wt@2E$7J=9SVHsdC?K(L(KACb#z)@C$xXD8^!7|uv zZh$6fkq)aoD}^79VqdJ!Nz-8$IrU(_-&^cHBI;4 z^$B+1aPe|LG)C55LjP;jab{dTf$0~xbXS9!!QdcmDYLbL^jvxu2y*qnx2%jbL%rB z{aP85qBJe#(&O~Prk%IJARcdEypZ)vah%ZZ%;Zk{eW(U)Bx7VlzgOi8)x z`rh4l`@l_Ada7z&yUK>ZF;i6YLGwI*Sg#Fk#Qr0Jg&VLax(nNN$u-XJ5=MsP3|(lEdIOJ7|(x3iY;ea)5#BW*mDV%^=8qOeYO&gIdJVuLLN3cFaN=xZtFB=b zH{l)PZl_j^u+qx@89}gAQW7ofb+k)QwX=aegihossZq*+@PlCpb$rpp>Cbk9UJO<~ zDjlXQ_Ig#W0zdD3&*ei(FwlN#3b%FSR%&M^ywF@Fr>d~do@-kIS$e%wkIVfJ|Ohh=zc zF&Rnic^|>@R%v?@jO}a9;nY3Qrg_!xC=ZWUcYiA5R+|2nsM*$+c$TOs6pm!}Z}dfM zGeBhMGWw3$6KZXav^>YNA=r6Es>p<6HRYcZY)z{>yasbC81A*G-le8~QoV;rtKnkx z;+os8BvEe?0A6W*a#dOudsv3aWs?d% z0oNngyVMjavLjtjiG`!007#?62ClTqqU$@kIY`=x^$2e>iqIy1>o|@Tw@)P)B8_1$r#6>DB_5 zmaOaoE~^9TolgDgooKFuEFB#klSF%9-~d2~_|kQ0Y{Ek=HH5yq9s zDq#1S551c`kSiWPZbweN^A4kWiP#Qg6er1}HcKv{fxb1*BULboD0fwfaNM_<55>qM zETZ8TJDO4V)=aPp_eQjX%||Ud<>wkIzvDlpNjqW>I}W!-j7M^TNe5JIFh#-}zAV!$ICOju8Kx)N z0vLtzDdy*rQN!7r>Xz7rLw8J-(GzQlYYVH$WK#F`i_i^qVlzTNAh>gBWKV@XC$T-` z3|kj#iCquDhiO7NKum07i|<-NuVsX}Q}mIP$jBJDMfUiaWR3c|F_kWBMw0_Sr|6h4 zk`_r5=0&rCR^*tOy$A8K;@|NqwncjZ>Y-75vlpxq%Cl3EgH`}^^~=u zoll6xxY@a>0f%Ddpi;=cY}fyG!K2N-dEyXXmUP5u){4VnyS^T4?pjN@Ot4zjL(Puw z_U#wMH2Z#8Pts{olG5Dy0tZj;N@;fHheu>YKYQU=4Bk|wcD9MbA`3O4bj$hNRHwzb zSLcG0SLV%zywdbuwl(^E_!@&)TdXge4O{MRWk2RKOt@!8E{$BU-AH(@4{gxs=YAz9LIob|Hzto0}9cWoz6Tp2x0&xi#$ zHh$dwO&UCR1Ob2w00-2eG7d4=cN(Y>0R#$q8?||q@iTi+7-w-xR%uMr&StFIthC<# zvK(aPduwuNB}oJUV8+Zl)%cnfsHI%4`;x6XW^UF^e4s3Z@S<&EV8?56Wya;HNs0E> z`$0dgRdiUz9RO9Au3RmYq>K#G=X%*_dUbSJHP`lSfBaN8t-~@F>)BL1RT*9I851A3 z<-+Gb#_QRX>~av#Ni<#zLswtu-c6{jGHR>wflhKLzC4P@b%8&~u)fosoNjk4r#GvC zlU#UU9&0Hv;d%g72Wq?Ym<&&vtA3AB##L}=ZjiTR4hh7J)e>ei} zt*u+>h%MwN`%3}b4wYpV=QwbY!jwfIj#{me)TDOG`?tI!%l=AwL2G@9I~}?_dA5g6 zCKgK(;6Q0&P&K21Tx~k=o6jwV{dI_G+Ba*Zts|Tl6q1zeC?iYJTb{hel*x>^wb|2RkHkU$!+S4OU4ZOKPZjV>9OVsqNnv5jK8TRAE$A&^yRwK zj-MJ3Pl?)KA~fq#*K~W0l4$0=8GRx^9+?w z!QT8*-)w|S^B0)ZeY5gZPI2G(QtQf?DjuK(s^$rMA!C%P22vynZY4SuOE=wX2f8$R z)A}mzJi4WJnZ`!bHG1=$lwaxm!GOnRbR15F$nRC-M*H<*VfF|pQw(;tbSfp({>9^5 zw_M1-SJ9eGF~m(0dvp*P8uaA0Yw+EkP-SWqu zqal$hK8SmM7#Mrs0@OD+%_J%H*bMyZiWAZdsIBj#lkZ!l2c&IpLu(5^T0Ge5PHzR} zn;TXs$+IQ_&;O~u=Jz+XE0wbOy`=6>m9JVG} zJ~Kp1e5m?K3x@@>!D)piw^eMIHjD4RebtR`|IlckplP1;r21wTi8v((KqNqn%2CB< zifaQc&T}*M&0i|LW^LgdjIaX|o~I$`owHolRqeH_CFrqCUCleN130&vH}dK|^kC>) z-r2P~mApHotL4dRX$25lIcRh_*kJaxi^%ZN5-GAAMOxfB!6flLPY-p&QzL9TE%ho( zRwftE3sy5<*^)qYzKkL|rE>n@hyr;xPqncY6QJ8125!MWr`UCWuC~A#G1AqF1@V$kv>@NBvN&2ygy*{QvxolkRRb%Ui zsmKROR%{*g*WjUUod@@cS^4eF^}yQ1>;WlGwOli z+Y$(8I`0(^d|w>{eaf!_BBM;NpCoeem2>J}82*!em=}}ymoXk>QEfJ>G(3LNA2-46 z5PGvjr)Xh9>aSe>vEzM*>xp{tJyZox1ZRl}QjcvX2TEgNc^(_-hir@Es>NySoa1g^ zFow_twnHdx(j?Q_3q51t3XI7YlJ4_q&(0#)&a+RUy{IcBq?)eaWo*=H2UUVIqtp&lW9JTJiP&u zw8+4vo~_IJXZIJb_U^&=GI1nSD%e;P!c{kZALNCm5c%%oF+I3DrA63_@4)(v4(t~JiddILp7jmoy+>cD~ivwoctFfEL zP*#2Rx?_&bCpX26MBgp^4G>@h`Hxc(lnqyj!*t>9sOBcXN(hTwEDpn^X{x!!gPX?1 z*uM$}cYRwHXuf+gYTB}gDTcw{TXSOUU$S?8BeP&sc!Lc{{pEv}x#ELX>6*ipI1#>8 zKes$bHjiJ1OygZge_ak^Hz#k;=od1wZ=o71ba7oClBMq>Uk6hVq|ePPt)@FM5bW$I z;d2Or@wBjbTyZj|;+iHp%Bo!Vy(X3YM-}lasMItEV_QrP-Kk_J4C>)L&I3Xxj=E?| zsAF(IfVQ4w+dRRnJ>)}o^3_012YYgFWE)5TT=l2657*L8_u1KC>Y-R{7w^S&A^X^U}h20jpS zQsdeaA#WIE*<8KG*oXc~$izYilTc#z{5xhpXmdT-YUnGh9v4c#lrHG6X82F2-t35} zB`jo$HjKe~E*W$=g|j&P>70_cI`GnOQ;Jp*JK#CT zuEGCn{8A@bC)~0%wsEv?O^hSZF*iqjO~_h|>xv>PO+?525Nw2472(yqS>(#R)D7O( zg)Zrj9n9$}=~b00=Wjf?E418qP-@8%MQ%PBiCTX=$B)e5cHFDu$LnOeJ~NC;xmOk# z>z&TbsK>Qzk)!88lNI8fOE2$Uxso^j*1fz>6Ot49y@=po)j4hbTIcVR`ePHpuJSfp zxaD^Dn3X}Na3@<_Pc>a;-|^Pon(>|ytG_+U^8j_JxP=_d>L$Hj?|0lz>_qQ#a|$+( z(x=Lipuc8p4^}1EQhI|TubffZvB~lu$zz9ao%T?%ZLyV5S9}cLeT?c} z>yCN9<04NRi~1oR)CiBakoNhY9BPnv)kw%*iv8vdr&&VgLGIs(-FbJ?d_gfbL2={- zBk4lkdPk~7+jIxd4{M(-W1AC_WcN&Oza@jZoj zaE*9Y;g83#m(OhA!w~LNfUJNUuRz*H-=$s*z+q+;snKPRm9EptejugC-@7-a-}Tz0 z@KHra#Y@OXK+KsaSN9WiGf?&jlZ!V7L||%KHP;SLksMFfjkeIMf<1e~t?!G3{n)H8 zQAlFY#QwfKuj;l@<$YDATAk;%PtD%B(0<|8>rXU< zJ66rkAVW_~Dj!7JGdGGi4NFuE?7ZafdMxIh65Sz7yQoA7fBZCE@WwysB=+`kT^LFX zz8#FlSA5)6FG9(qL3~A24mpzL@@2D#>0J7mMS1T*9UJ zvOq!!a(%IYY69+h45CE?(&v9H4FCr>gK0>mK~F}5RdOuH2{4|}k@5XpsX7+LZo^Qa4sH5`eUj>iffoBVm+ zz4Mtf`h?NW$*q1yr|}E&eNl)J``SZvTf6Qr*&S%tVv_OBpbjnA0&Vz#(;QmGiq-k! zgS0br4I&+^2mgA15*~Cd00cXLYOLA#Ep}_)eED>m+K@JTPr_|lSN}(OzFXQSBc6fM z@f-%2;1@BzhZa*LFV z-LrLmkmB%<<&jEURBEW>soaZ*rSIJNwaV%-RSaCZi4X)qYy^PxZ=oL?6N-5OGOMD2 z;q_JK?zkwQ@b3~ln&sDtT5SpW9a0q+5Gm|fpVY2|zqlNYBR}E5+ahgdj!CvK$Tlk0 z9g$5N;aar=CqMsudQV>yb4l@hN(9Jcc=1(|OHsqH6|g=K-WBd8GxZ`AkT?OO z-z_Ued-??Z*R4~L7jwJ%-`s~FK|qNAJ;EmIVDVpk{Lr7T4l{}vL)|GuUuswe9c5F| zv*5%u01hlv08?00Vpwyk*Q&&fY8k6MjOfpZfKa@F-^6d=Zv|0@&4_544RP5(s|4VPVP-f>%u(J@23BHqo2=zJ#v9g=F!cP((h zpt0|(s++ej?|$;2PE%+kc6JMmJjDW)3BXvBK!h!E`8Y&*7hS{c_Z?4SFP&Y<3evqf z9-ke+bSj$%Pk{CJlJbWwlBg^mEC^@%Ou?o>*|O)rl&`KIbHrjcpqsc$Zqt0^^F-gU2O=BusO+(Op}!jNzLMc zT;0YT%$@ClS%V+6lMTfhuzzxomoat=1H?1$5Ei7&M|gxo`~{UiV5w64Np6xV zVK^nL$)#^tjhCpTQMspXI({TW^U5h&Wi1Jl8g?P1YCV4=%ZYyjSo#5$SX&`r&1PyC zzc;uzCd)VTIih|8eNqFNeBMe#j_FS6rq81b>5?aXg+E#&$m++Gz9<+2)h=K(xtn}F ziV{rmu+Y>A)qvF}ms}4X^Isy!M&1%$E!rTO~5(p+8{U6#hWu>(Ll1}eD64Xa>~73A*538wry?v$vW z>^O#FRdbj(k0Nr&)U`Tl(4PI*%IV~;ZcI2z&rmq=(k^}zGOYZF3b2~Klpzd2eZJl> zB=MOLwI1{$RxQ7Y4e30&yOx?BvAvDkTBvWPpl4V8B7o>4SJn*+h1Ms&fHso%XLN5j z-zEwT%dTefp~)J_C8;Q6i$t!dnlh-!%haR1X_NuYUuP-)`IGWjwzAvp!9@h`kPZhf zwLwFk{m3arCdx8rD~K2`42mIN4}m%OQ|f)4kf%pL?Af5Ul<3M2fv>;nlhEPR8b)u} zIV*2-wyyD%%) zl$G@KrC#cUwoL?YdQyf9WH)@gWB{jd5w4evI& zOFF)p_D8>;3-N1z6mES!OPe>B^<;9xsh)){Cw$Vs-ez5nXS95NOr3s$IU;>VZSzKn zBvub8_J~I%(DozZW@{)Vp37-zevxMRZ8$8iRfwHmYvyjOxIOAF2FUngKj289!(uxY zaClWm!%x&teKmr^ABrvZ(ikx{{I-lEzw5&4t3P0eX%M~>$wG0ZjA4Mb&op+0$#SO_ z--R`>X!aqFu^F|a!{Up-iF(K+alKB{MNMs>e(i@Tpy+7Z-dK%IEjQFO(G+2mOb@BO zP>WHlS#fSQm0et)bG8^ZDScGnh-qRKIFz zfUdnk=m){ej0i(VBd@RLtRq3Ep=>&2zZ2%&vvf?Iex01hx1X!8U+?>ER;yJlR-2q4 z;Y@hzhEC=d+Le%=esE>OQ!Q|E%6yG3V_2*uh&_nguPcZ{q?DNq8h_2ahaP6=pP-+x zK!(ve(yfoYC+n(_+chiJ6N(ZaN+XSZ{|H{TR1J_s8x4jpis-Z-rlRvRK#U%SMJ(`C z?T2 zF(NNfO_&W%2roEC2j#v*(nRgl1X)V-USp-H|CwFNs?n@&vpRcj@W@xCJwR6@T!jt377?XjZ06=`d*MFyTdyvW!`mQm~t3luzYzvh^F zM|V}rO>IlBjZc}9Z zd$&!tthvr>5)m;5;96LWiAV0?t)7suqdh0cZis`^Pyg@?t>Ms~7{nCU;z`Xl+raSr zXpp=W1oHB*98s!Tpw=R5C)O{{Inl>9l7M*kq%#w9a$6N~v?BY2GKOVRkXYCgg*d

<5G2M1WZP5 zzqSuO91lJod(SBDDw<*sX(+F6Uq~YAeYV#2A;XQu_p=N5X+#cmu19Qk>QAnV=k!?wbk5I;tDWgFc}0NkvC*G=V+Yh1cyeJVq~9czZiDXe+S=VfL2g`LWo8om z$Y~FQc6MFjV-t1Y`^D9XMwY*U_re2R?&(O~68T&D4S{X`6JYU-pz=}ew-)V0AOUT1 zVOkHAB-8uBcRjLvz<9HS#a@X*Kc@|W)nyiSgi|u5$Md|P()%2(?olGg@ypoJwp6>m z*dnfjjWC>?_1p;%1brqZyDRR;8EntVA92EJ3ByOxj6a+bhPl z;a?m4rQAV1@QU^#M1HX)0+}A<7TCO`ZR_RzF}X9-M>cRLyN4C+lCk2)kT^3gN^`IT zNP~fAm(wyIoR+l^lQDA(e1Yv}&$I!n?&*p6?lZcQ+vGLLd~fM)qt}wsbf3r=tmVYe zl)ntf#E!P7wlakP9MXS7m0nsAmqxZ*)#j;M&0De`oNmFgi$ov#!`6^4)iQyxg5Iuj zjLAhzQ)r`^hf7`*1`Rh`X;LVBtDSz@0T?kkT1o!ijeyTGt5vc^Cd*tmNgiNo^EaWvaC8$e+nb_{W01j3%=1Y&92YacjCi>eNbwk%-gPQ@H-+4xskQ}f_c=jg^S-# zYFBDf)2?@5cy@^@FHK5$YdAK9cI;!?Jgd}25lOW%xbCJ>By3=HiK@1EM+I46A)Lsd zeT|ZH;KlCml=@;5+hfYf>QNOr^XNH%J-lvev)$Omy8MZ`!{`j>(J5cG&ZXXgv)TaF zg;cz99i$4CX_@3MIb?GL0s*8J=3`#P(jXF(_(6DXZjc@(@h&=M&JG)9&Te1?(^XMW zjjC_70|b=9hB6pKQi`S^Ls7JyJw^@P>Ko^&q8F&?>6i;#CbxUiLz1ZH4lNyd@QACd zu>{!sqjB!2Dg}pbAXD>d!3jW}=5aN0b;rw*W>*PAxm7D)aw(c*RX2@bTGEI|RRp}vw7;NR2wa;rXN{L{Q#=Fa z$x@ms6pqb>!8AuV(prv>|aU8oWV={C&$c zMa=p=CDNOC2tISZcd8~18GN5oTbKY+Vrq;3_obJlfSKRMk;Hdp1`y`&LNSOqeauR_ z^j*Ojl3Ohzb5-a49A8s|UnM*NM8tg}BJXdci5%h&;$afbmRpN0&~9rCnBA`#lG!p zc{(9Y?A0Y9yo?wSYn>iigf~KP$0*@bGZ>*YM4&D;@{<%Gg5^uUJGRrV4 z(aZOGB&{_0f*O=Oi0k{@8vN^BU>s3jJRS&CJOl3o|BE{FAA&a#2YYiX3pZz@|Go-F z|Fly;7eX2OTs>R}<`4RwpHFs9nwh)B28*o5qK1Ge=_^w0m`uJOv!=&!tzt#Save(C zgKU=Bsgql|`ui(e1KVxR`?>Dx>(rD1$iWp&m`v)3A!j5(6vBm*z|aKm*T*)mo(W;R zNGo2`KM!^SS7+*9YxTm6YMm_oSrLceqN*nDOAtagULuZl5Q<7mOnB@Hq&P|#9y{5B z!2x+2s<%Cv2Aa0+u{bjZXS);#IFPk(Ph-K7K?3i|4ro> zRbqJoiOEYo(Im^((r}U4b8nvo_>4<`)ut`24?ILnglT;Pd&U}$lV3U$F9#PD(O=yV zgNNA=GW|(E=&m_1;uaNmipQe?pon4{T=zK!N!2_CJL0E*R^XXIKf*wi!>@l}3_P9Z zF~JyMbW!+n-+>!u=A1ESxzkJy$DRuG+$oioG7(@Et|xVbJ#BCt;J43Nvj@MKvTxzy zMmjNuc#LXBxFAwIGZJk~^!q$*`FME}yKE8d1f5Mp}KHNq(@=Z8YxV}0@;YS~|SpGg$_jG7>_8WWYcVx#4SxpzlV9N4aO>K{c z$P?a_fyDzGX$Of3@ykvedGd<@-R;M^Shlj*SswJLD+j@hi_&_>6WZ}#AYLR0iWMK|A zH_NBeu(tMyG=6VO-=Pb>-Q#$F*or}KmEGg*-n?vWQREURdB#+6AvOj*I%!R-4E_2$ zU5n9m>RWs|Wr;h2DaO&mFBdDb-Z{APGQx$(L`if?C|njd*fC=rTS%{o69U|meRvu?N;Z|Y zbT|ojL>j;q*?xXmnHH#3R4O-59NV1j=uapkK7}6@Wo*^Nd#(;$iuGsb;H315xh3pl zHaJ>h-_$hdNl{+|Zb%DZH%ES;*P*v0#}g|vrKm9;j-9e1M4qX@zkl&5OiwnCz=tb6 zz<6HXD+rGIVpGtkb{Q^LIgExOm zz?I|oO9)!BOLW#krLmWvX5(k!h{i>ots*EhpvAE;06K|u_c~y{#b|UxQ*O@Ks=bca z^_F0a@61j3I(Ziv{xLb8AXQj3;R{f_l6a#H5ukg5rxwF9A$?Qp-Mo54`N-SKc}fWp z0T)-L@V$$&my;l#Ha{O@!fK4-FSA)L&3<${Hcwa7ue`=f&YsXY(NgeDU#sRlT3+9J z6;(^(sjSK@3?oMo$%L-nqy*E;3pb0nZLx6 z;h5)T$y8GXK1DS-F@bGun8|J(v-9o=42&nLJy#}M5D0T^5VWBNn$RpC zZzG6Bt66VY4_?W=PX$DMpKAI!d`INr) zkMB{XPQ<52rvWVQqgI0OL_NWxoe`xxw&X8yVftdODPj5|t}S6*VMqN$-h9)1MBe0N zYq?g0+e8fJCoAksr0af1)FYtz?Me!Cxn`gUx&|T;)695GG6HF7!Kg1zzRf_{VWv^bo81v4$?F6u2g|wxHc6eJQAg&V z#%0DnWm2Rmu71rPJ8#xFUNFC*V{+N_qqFH@gYRLZ6C?GAcVRi>^n3zQxORPG)$-B~ z%_oB?-%Zf7d*Fe;cf%tQwcGv2S?rD$Z&>QC2X^vwYjnr5pa5u#38cHCt4G3|efuci z@3z=#A13`+ztmp;%zjXwPY_aq-;isu*hecWWX_=Z8paSqq7;XYnUjK*T>c4~PR4W7 z#C*%_H&tfGx`Y$w7`dXvVhmovDnT>btmy~SLf>>~84jkoQ%cv=MMb+a{JV&t0+1`I z32g_Y@yDhKe|K^PevP~MiiVl{Ou7^Mt9{lOnXEQ`xY^6L8D$705GON{!1?1&YJEl#fTf5Z)da=yiEQ zGgtC-soFGOEBEB~ZF_{7b(76En>d}mI~XIwNw{e>=Fv)sgcw@qOsykWr?+qAOZSVrQfg}TNI ztKNG)1SRrAt6#Q?(me%)>&A_^DM`pL>J{2xu>xa$3d@90xR61TQDl@fu%_85DuUUA za9tn64?At;{`BAW6oykwntxHeDpXsV#{tmt5RqdN7LtcF4vR~_kZNT|wqyR#z^Xcd zFdymVRZvyLfTpBT>w9<)Ozv@;Yk@dOSVWbbtm^y@@C>?flP^EgQPAwsy75bveo=}T zFxl(f)s)j(0#N_>Or(xEuV(n$M+`#;Pc$1@OjXEJZumkaekVqgP_i}p`oTx;terTx zZpT+0dpUya2hqlf`SpXN{}>PfhajNk_J0`H|2<5E;U5Vh4F8er z;RxLSFgpGhkU>W?IwdW~NZTyOBrQ84H7_?gviIf71l`EETodG9a1!8e{jW?DpwjL? zGEM&eCzwoZt^P*8KHZ$B<%{I}>46IT%jJ3AnnB5P%D2E2Z_ z1M!vr#8r}1|KTqWA4%67ZdbMW2YJ81b(KF&SQ2L1Qn(y-=J${p?xLMx3W7*MK;LFQ z6Z`aU;;mTL4XrrE;HY*Rkh6N%?qviUGNAKiCB~!P}Z->IpO6E(gGd7I#eDuT7j|?nZ zK}I(EJ>$Kb&@338M~O+em9(L!+=0zBR;JAQesx|3?Ok90)D1aS9P?yTh6Poh8Cr4X zk3zc=f2rE7jj+aP7nUsr@~?^EGP>Q>h#NHS?F{Cn`g-gD<8F&dqOh-0sa%pfL`b+1 zUsF*4a~)KGb4te&K0}bE>z3yb8% zibb5Q%Sfiv7feb1r0tfmiMv z@^4XYwg@KZI=;`wC)`1jUA9Kv{HKe2t$WmRcR4y8)VAFjRi zaz&O7Y2tDmc5+SX(bj6yGHYk$dBkWc96u3u&F)2yEE~*i0F%t9Kg^L6MJSb&?wrXi zGSc;_rln$!^ybwYBeacEFRsVGq-&4uC{F)*Y;<0y7~USXswMo>j4?~5%Zm!m@i@-> zXzi82sa-vpU{6MFRktJy+E0j#w`f`>Lbog{zP|9~hg(r{RCa!uGe>Yl536cn$;ouH za#@8XMvS-kddc1`!1LVq;h57~zV`7IYR}pp3u!JtE6Q67 zq3H9ZUcWPm2V4IukS}MCHSdF0qg2@~ufNx9+VMjQP&exiG_u9TZAeAEj*jw($G)zL zq9%#v{wVyOAC4A~AF=dPX|M}MZV)s(qI9@aIK?Pe+~ch|>QYb+78lDF*Nxz2-vpRbtQ*F4$0fDbvNM#CCatgQ@z1+EZWrt z2dZfywXkiW=no5jus-92>gXn5rFQ-COvKyegmL=4+NPzw6o@a?wGE-1Bt;pCHe;34K%Z z-FnOb%!nH;)gX+!a3nCk?5(f1HaWZBMmmC@lc({dUah+E;NOros{?ui1zPC-Q0);w zEbJmdE$oU$AVGQPdm{?xxI_0CKNG$LbY*i?YRQ$(&;NiA#h@DCxC(U@AJ$Yt}}^xt-EC_ z4!;QlLkjvSOhdx!bR~W|Ezmuf6A#@T`2tsjkr>TvW*lFCMY>Na_v8+{Y|=MCu1P8y z89vPiH5+CKcG-5lzk0oY>~aJC_0+4rS@c@ZVKLAp`G-sJB$$)^4*A!B zmcf}lIw|VxV9NSoJ8Ag3CwN&d7`|@>&B|l9G8tXT^BDHOUPrtC70NgwN4${$k~d_4 zJ@eo6%YQnOgq$th?0{h`KnqYa$Nz@vlHw<%!C5du6<*j1nwquk=uY}B8r7f|lY+v7 zm|JU$US08ugor8E$h3wH$c&i~;guC|3-tqJy#T;v(g( zBZtPMSyv%jzf->435yM(-UfyHq_D=6;ouL4!ZoD+xI5uCM5ay2m)RPmm$I}h>()hS zO!0gzMxc`BPkUZ)WXaXam%1;)gedA7SM8~8yIy@6TPg!hR0=T>4$Zxd)j&P-pXeSF z9W`lg6@~YDhd19B9ETv(%er^Xp8Yj@AuFVR_8t*KS;6VHkEDKI#!@l!l3v6`W1`1~ zP{C@keuV4Q`Rjc08lx?zmT$e$!3esc9&$XZf4nRL(Z*@keUbk!GZi(2Bmyq*saOD? z3Q$V<*P-X1p2}aQmuMw9nSMbOzuASsxten7DKd6A@ftZ=NhJ(0IM|Jr<91uAul4JR zADqY^AOVT3a(NIxg|U;fyc#ZnSzw2cr}#a5lZ38>nP{05D)7~ad7JPhw!LqOwATXtRhK!w0X4HgS1i<%AxbFmGJx9?sEURV+S{k~g zGYF$IWSlQonq6}e;B(X(sIH|;52+(LYW}v_gBcp|x%rEAVB`5LXg_d5{Q5tMDu0_2 z|LOm$@K2?lrLNF=mr%YP|U-t)~9bqd+wHb4KuPmNK<}PK6e@aosGZK57=Zt+kcszVOSbe;`E^dN! ze7`ha3WUUU7(nS0{?@!}{0+-VO4A{7+nL~UOPW9_P(6^GL0h${SLtqG!} zKl~Ng5#@Sy?65wk9z*3SA`Dpd4b4T^@C8Fhd8O)k_4%0RZL5?#b~jmgU+0|DB%0Z) zql-cPC>A9HPjdOTpPC` zQwvF}uB5kG$Xr4XnaH#ruSjM*xG?_hT7y3G+8Ox`flzU^QIgb_>2&-f+XB6MDr-na zSi#S+c!ToK84<&m6sCiGTd^8pNdXo+$3^l3FL_E`0 z>8it5YIDxtTp2Tm(?}FX^w{fbfgh7>^8mtvN>9fWgFN_*a1P`Gz*dyOZF{OV7BC#j zQV=FQM5m>47xXgapI$WbPM5V`V<7J9tD)oz@d~MDoM`R^Y6-Na(lO~uvZlpu?;zw6 zVO1faor3dg#JEb5Q*gz4<W8tgC3nE2BG2jeIQs1)<{In&7hJ39x=;ih;CJDy)>0S1at*7n?Wr0ahYCpFjZ|@u91Zl7( zv;CSBRC65-6f+*JPf4p1UZ)k=XivKTX6_bWT~7V#rq0Xjas6hMO!HJN8GdpBKg_$B zwDHJF6;z?h<;GXFZan8W{XFNPpOj!(&I1`&kWO86p?Xz`a$`7qV7Xqev|7nn_lQuX ziGpU1MMYt&5dE2A62iX3;*0WzNB9*nSTzI%62A+N?f?;S>N@8M=|ef3gtQTIA*=yq zQAAjOqa!CkHOQo4?TsqrrsJLclXcP?dlAVv?v`}YUjo1Htt;6djP@NPFH+&p1I+f_ z)Y279{7OWomY8baT(4TAOlz1OyD{4P?(DGv3XyJTA2IXe=kqD)^h(@*E3{I~w;ws8 z)ZWv7E)pbEM zd3MOXRH3mQhks9 zv6{s;k0y5vrcjXaVfw8^>YyPo=oIqd5IGI{)+TZq5Z5O&hXAw%ZlL}^6FugH;-%vP zAaKFtt3i^ag226=f0YjzdPn6|4(C2sC5wHFX{7QF!tG1E-JFA`>eZ`}$ymcRJK?0c zN363o{&ir)QySOFY0vcu6)kX#;l??|7o{HBDVJN+17rt|w3;(C_1b>d;g9Gp=8YVl zYTtA52@!7AUEkTm@P&h#eg+F*lR zQ7iotZTcMR1frJ0*V@Hw__~CL>_~2H2cCtuzYIUD24=Cv!1j6s{QS!v=PzwQ(a0HS zBKx04KA}-Ue+%9d`?PG*hIij@54RDSQpA7|>qYVIrK_G6%6;#ZkR}NjUgmGju)2F`>|WJoljo)DJgZr4eo1k1i1+o z1D{>^RlpIY8OUaOEf5EBu%a&~c5aWnqM zxBpJq98f=%M^{4mm~5`CWl%)nFR64U{(chmST&2jp+-r z3675V<;Qi-kJud%oWnCLdaU-)xTnMM%rx%Jw6v@=J|Ir=4n-1Z23r-EVf91CGMGNz zb~wyv4V{H-hkr3j3WbGnComiqmS0vn?n?5v2`Vi>{Ip3OZUEPN7N8XeUtF)Ry6>y> zvn0BTLCiqGroFu|m2zG-;Xb6;W`UyLw)@v}H&(M}XCEVXZQoWF=Ykr5lX3XWwyNyF z#jHv)A*L~2BZ4lX?AlN3X#axMwOC)PoVy^6lCGse9bkGjb=qz%kDa6}MOmSwK`cVO zt(e*MW-x}XtU?GY5}9{MKhRhYOlLhJE5=ca+-RmO04^ z66z{40J=s=ey9OCdc(RCzy zd7Zr1%!y3}MG(D=wM_ebhXnJ@MLi7cImDkhm0y{d-Vm81j`0mbi4lF=eirlr)oW~a zCd?26&j^m4AeXEsIUXiTal)+SPM4)HX%%YWF1?(FV47BaA`h9m67S9x>hWMVHx~Hg z1meUYoLL(p@b3?x|9DgWeI|AJ`Ia84*P{Mb%H$ZRROouR4wZhOPX15=KiBMHl!^JnCt$Az`KiH^_d>cev&f zaG2>cWf$=A@&GP~DubsgYb|L~o)cn5h%2`i^!2)bzOTw2UR!>q5^r&2Vy}JaWFUQE04v>2;Z@ZPwXr?y&G(B^@&y zsd6kC=hHdKV>!NDLIj+3rgZJ|dF`%N$DNd;B)9BbiT9Ju^Wt%%u}SvfM^=|q-nxDG zuWCQG9e#~Q5cyf8@y76#kkR^}{c<_KnZ0QsZcAT|YLRo~&tU|N@BjxOuy`#>`X~Q< z?R?-Gsk$$!oo(BveQLlUrcL#eirhgBLh`qHEMg`+sR1`A=1QX7)ZLMRT+GBy?&mM8 zQG^z-!Oa&J-k7I(3_2#Q6Bg=NX<|@X&+YMIOzfEO2$6Mnh}YV!m!e^__{W@-CTprr zbdh3f=BeCD$gHwCrmwgM3LAv3!Mh$wM)~KWzp^w)Cu6roO7uUG5z*}i0_0j47}pK; ztN530`ScGatLOL06~zO)Qmuv`h!gq5l#wx(EliKe&rz-5qH(hb1*fB#B+q`9=jLp@ zOa2)>JTl7ovxMbrif`Xe9;+fqB1K#l=Dv!iT;xF zdkCvS>C5q|O;}ns3AgoE({Ua-zNT-9_5|P0iANmC6O76Sq_(AN?UeEQJ>#b54fi3k zFmh+P%b1x3^)0M;QxXLP!BZ^h|AhOde*{9A=f3|Xq*JAs^Y{eViF|=EBfS6L%k4ip zk+7M$gEKI3?bQg?H3zaE@;cyv9kv;cqK$VxQbFEsy^iM{XXW0@2|DOu$!-k zSFl}Y=jt-VaT>Cx*KQnHTyXt}f9XswFB9ibYh+k2J!ofO+nD?1iw@mwtrqI4_i?nE zhLkPp41ED62me}J<`3RN80#vjW;wt`pP?%oQ!oqy7`miL>d-35a=qotK$p{IzeSk# ze_$CFYp_zIkrPFVaW^s#U4xT1lI^A0IBe~Y<4uS%zSV=wcuLr%gQT=&5$&K*bwqx| zWzCMiz>7t^Et@9CRUm9E+@hy~sBpm9fri$sE1zgLU((1?Yg{N1Sars=DiW&~Zw=3I zi7y)&oTC?UWD2w97xQ&5vx zRXEBGeJ(I?Y}eR0_O{$~)bMJRTsNUPIfR!xU9PE7A>AMNr_wbrFK>&vVw=Y;RH zO$mlpmMsQ}-FQ2cSj7s7GpC+~^Q~dC?y>M}%!-3kq(F3hGWo9B-Gn02AwUgJ>Z-pKOaj zysJBQx{1>Va=*e@sLb2z&RmQ7ira;aBijM-xQ&cpR>X3wP^foXM~u1>sv9xOjzZpX z0K;EGouSYD~oQ&lAafj3~EaXfFShC+>VsRlEMa9cg9i zFxhCKO}K0ax6g4@DEA?dg{mo>s+~RPI^ybb^u--^nTF>**0l5R9pocwB?_K)BG_)S zyLb&k%XZhBVr7U$wlhMqwL)_r&&n%*N$}~qijbkfM|dIWP{MyLx}X&}ES?}7i;9bW zmTVK@zR)7kE2+L42Q`n4m0VVg5l5(W`SC9HsfrLZ=v%lpef=Gj)W59VTLe+Z$8T8i z4V%5+T0t8LnM&H>Rsm5C%qpWBFqgTwL{=_4mE{S3EnBXknM&u8n}A^IIM4$s3m(Rd z>zq=CP-!9p9es2C*)_hoL@tDYABn+o#*l;6@7;knWIyDrt5EuakO99S$}n((Fj4y} zD!VvuRzghcE{!s;jC*<_H$y6!6QpePo2A3ZbX*ZzRnQq*b%KK^NF^z96CHaWmzU@f z#j;y?X=UP&+YS3kZx7;{ zDA{9(wfz7GF`1A6iB6fnXu0?&d|^p|6)%3$aG0Uor~8o? z*e}u#qz7Ri?8Uxp4m_u{a@%bztvz-BzewR6bh*1Xp+G=tQGpcy|4V_&*aOqu|32CM zz3r*E8o8SNea2hYJpLQ-_}R&M9^%@AMx&`1H8aDx4j%-gE+baf2+9zI*+Pmt+v{39 zDZ3Ix_vPYSc;Y;yn68kW4CG>PE5RoaV0n@#eVmk?p$u&Fy&KDTy!f^Hy6&^-H*)#u zdrSCTJPJw?(hLf56%2;_3n|ujUSJOU8VPOTlDULwt0jS@j^t1WS z!n7dZIoT+|O9hFUUMbID4Ec$!cc($DuQWkocVRcYSikFeM&RZ=?BW)mG4?fh#)KVG zcJ!<=-8{&MdE)+}?C8s{k@l49I|Zwswy^ZN3;E!FKyglY~Aq?4m74P-0)sMTGXqd5(S<-(DjjM z&7dL-Mr8jhUCAG$5^mI<|%`;JI5FVUnNj!VO2?Jiqa|c2;4^n!R z`5KK0hyB*F4w%cJ@Un6GC{mY&r%g`OX|1w2$B7wxu97%<@~9>NlXYd9RMF2UM>(z0 zouu4*+u+1*k;+nFPk%ly!nuMBgH4sL5Z`@Rok&?Ef=JrTmvBAS1h?C0)ty5+yEFRz zY$G=coQtNmT@1O5uk#_MQM1&bPPnspy5#>=_7%WcEL*n$;sSAZcXxMpcXxLe;_mLA z5F_paad+bGZV*oh@8h0(|D2P!q# zTHjmiphJ=AazSeKQPkGOR-D8``LjzToyx{lfK-1CDD6M7?pMZOdLKFtjZaZMPk4}k zW)97Fh(Z+_Fqv(Q_CMH-YYi?fR5fBnz7KOt0*t^cxmDoIokc=+`o# zrud|^h_?KW=Gv%byo~(Ln@({?3gnd?DUf-j2J}|$Mk>mOB+1{ZQ8HgY#SA8END(Zw z3T+W)a&;OO54~m}ffemh^oZ!Vv;!O&yhL0~hs(p^(Yv=(3c+PzPXlS5W79Er8B1o* z`c`NyS{Zj_mKChj+q=w)B}K za*zzPhs?c^`EQ;keH{-OXdXJet1EsQ)7;{3eF!-t^4_Srg4(Ot7M*E~91gwnfhqaM zNR7dFaWm7MlDYWS*m}CH${o?+YgHiPC|4?X?`vV+ws&Hf1ZO-w@OGG^o4|`b{bLZj z&9l=aA-Y(L11!EvRjc3Zpxk7lc@yH1e$a}8$_-r$)5++`_eUr1+dTb@ zU~2P1HM#W8qiNN3b*=f+FfG1!rFxnNlGx{15}BTIHgxO>Cq4 z;#9H9YjH%>Z2frJDJ8=xq>Z@H%GxXosS@Z>cY9ppF+)e~t_hWXYlrO6)0p7NBMa`+ z^L>-#GTh;k_XnE)Cgy|0Dw;(c0* zSzW14ZXozu)|I@5mRFF1eO%JM=f~R1dkNpZM+Jh(?&Zje3NgM{2ezg1N`AQg5%+3Y z64PZ0rPq6;_)Pj-hyIOgH_Gh`1$j1!jhml7ksHA1`CH3FDKiHLz+~=^u@kUM{ilI5 z^FPiJ7mSrzBs9{HXi2{sFhl5AyqwUnU{sPcUD{3+l-ZHAQ)C;c$=g1bdoxeG(5N01 zZy=t8i{*w9m?Y>V;uE&Uy~iY{pY4AV3_N;RL_jT_QtLFx^KjcUy~q9KcLE3$QJ{!)@$@En{UGG7&}lc*5Kuc^780;7Bj;)X?1CSy*^^ zPP^M)Pr5R>mvp3_hmCtS?5;W^e@5BjE>Cs<`lHDxj<|gtOK4De?Sf0YuK5GX9G93i zMYB{8X|hw|T6HqCf7Cv&r8A$S@AcgG1cF&iJ5=%+x;3yB`!lQ}2Hr(DE8=LuNb~Vs z=FO&2pdc16nD$1QL7j+!U^XWTI?2qQKt3H8=beVTdHHa9=MiJ&tM1RRQ-=+vy!~iz zj3O{pyRhCQ+b(>jC*H)J)%Wq}p>;?@W*Eut@P&?VU+Sdw^4kE8lvX|6czf{l*~L;J zFm*V~UC;3oQY(ytD|D*%*uVrBB}BbAfjK&%S;z;7$w68(8PV_whC~yvkZmX)xD^s6 z{$1Q}q;99W?*YkD2*;)tRCS{q2s@JzlO~<8x9}X<0?hCD5vpydvOw#Z$2;$@cZkYrp83J0PsS~!CFtY%BP=yxG?<@#{7%2sy zOc&^FJxsUYN36kSY)d7W=*1-{7ghPAQAXwT7z+NlESlkUH&8ODlpc8iC*iQ^MAe(B z?*xO4i{zFz^G=^G#9MsLKIN64rRJykiuIVX5~0#vAyDWc9-=6BDNT_aggS2G{B>dD ze-B%d3b6iCfc5{@yz$>=@1kdK^tX9qh0=ocv@9$ai``a_ofxT=>X7_Y0`X}a^M?d# z%EG)4@`^Ej_=%0_J-{ga!gFtji_byY&Vk@T1c|ucNAr(JNr@)nCWj?QnCyvXg&?FW;S-VOmNL6^km_dqiVjJuIASVGSFEos@EVF7St$WE&Z%)`Q##+0 zjaZ=JI1G@0!?l|^+-ZrNd$WrHBi)DA0-Eke>dp=_XpV<%CO_Wf5kQx}5e<90dt>8k zAi00d0rQ821nA>B4JHN7U8Zz=0;9&U6LOTKOaC1FC8GgO&kc=_wHIOGycL@c*$`ce703t%>S}mvxEnD-V!;6c`2(p74V7D0No1Xxt`urE66$0(ThaAZ1YVG#QP$ zy~NN%kB*zhZ2Y!kjn826pw4bh)75*e!dse+2Db(;bN34Uq7bLpr47XTX{8UEeC?2i z*{$`3dP}32${8pF$!$2Vq^gY|#w+VA_|o(oWmQX8^iw#n_crb(K3{69*iU?<%C-%H zuKi)3M1BhJ@3VW>JA`M>L~5*_bxH@Euy@niFrI$82C1}fwR$p2E&ZYnu?jlS}u7W9AyfdXh2pM>78bIt3 z)JBh&XE@zA!kyCDfvZ1qN^np20c1u#%P6;6tU&dx0phT1l=(mw7`u!-0e=PxEjDds z9E}{E!7f9>jaCQhw)&2TtG-qiD)lD(4jQ!q{`x|8l&nmtHkdul# zy+CIF8lKbp9_w{;oR+jSLtTfE+B@tOd6h=QePP>rh4@~!8c;Hlg9m%%&?e`*Z?qz5-zLEWfi>`ord5uHF-s{^bexKAoMEV@9nU z^5nA{f{dW&g$)BAGfkq@r5D)jr%!Ven~Q58c!Kr;*Li#`4Bu_?BU0`Y`nVQGhNZk@ z!>Yr$+nB=`z#o2nR0)V3M7-eVLuY`z@6CT#OTUXKnxZn$fNLPv7w1y7eGE=Qv@Hey`n;`U=xEl|q@CCV^#l)s0ZfT+mUf z^(j5r4)L5i2jnHW4+!6Si3q_LdOLQi<^fu?6WdohIkn79=jf%Fs3JkeXwF(?_tcF? z?z#j6iXEd(wJy4|p6v?xNk-)iIf2oX5^^Y3q3ziw16p9C6B;{COXul%)`>nuUoM*q zzmr|NJ5n)+sF$!yH5zwp=iM1#ZR`O%L83tyog-qh1I z0%dcj{NUs?{myT~33H^(%0QOM>-$hGFeP;U$puxoJ>>o-%Lk*8X^rx1>j|LtH$*)>1C!Pv&gd16%`qw5LdOIUbkNhaBBTo}5iuE%K&ZV^ zAr_)kkeNKNYJRgjsR%vexa~&8qMrQYY}+RbZ)egRg9_$vkoyV|Nc&MH@8L)`&rpqd zXnVaI@~A;Z^c3+{x=xgdhnocA&OP6^rr@rTvCnhG6^tMox$ulw2U7NgUtW%|-5VeH z_qyd47}1?IbuKtqNbNx$HR`*+9o=8`%vM8&SIKbkX9&%TS++x z5|&6P<%=F$C?owUI`%uvUq^yW0>`>yz!|WjzsoB9dT;2Dx8iSuK%%_XPgy0dTD4kd zDXF@&O_vBVVKQq(9YTClUPM30Sk7B!v7nOyV`XC!BA;BIVwphh+c)?5VJ^(C;GoQ$ zvBxr7_p*k$T%I1ke}`U&)$uf}I_T~#3XTi53OX)PoXVgxEcLJgZG^i47U&>LY(l%_ z;9vVDEtuMCyu2fqZeez|RbbIE7@)UtJvgAcVwVZNLccswxm+*L&w`&t=ttT=sv6Aq z!HouSc-24Y9;0q$>jX<1DnnGmAsP))- z^F~o99gHZw`S&Aw7e4id6Lg7kMk-e)B~=tZ!kE7sGTOJ)8@q}np@j7&7Sy{2`D^FH zI7aX%06vKsfJ168QnCM2=l|i>{I{%@gcr>ExM0Dw{PX6ozEuqFYEt z087%MKC;wVsMV}kIiuu9Zz9~H!21d!;Cu#b;hMDIP7nw3xSX~#?5#SSjyyg+Y@xh| z%(~fv3`0j#5CA2D8!M2TrG=8{%>YFr(j)I0DYlcz(2~92?G*?DeuoadkcjmZszH5& zKI@Lis%;RPJ8mNsbrxH@?J8Y2LaVjUIhRUiO-oqjy<&{2X~*f|)YxnUc6OU&5iac= z*^0qwD~L%FKiPmlzi&~a*9sk2$u<7Al=_`Ox^o2*kEv?p`#G(p(&i|ot8}T;8KLk- zPVf_4A9R`5^e`Om2LV*cK59EshYXse&IoByj}4WZaBomoHAPKqxRKbPcD`lMBI)g- zeMRY{gFaUuecSD6q!+b5(?vAnf>c`Z(8@RJy%Ulf?W~xB1dFAjw?CjSn$ph>st5bc zUac1aD_m6{l|$#g_v6;=32(mwpveQDWhmjR7{|B=$oBhz`7_g7qNp)n20|^^op3 zSfTdWV#Q>cb{CMKlWk91^;mHap{mk)o?udk$^Q^^u@&jd zfZ;)saW6{e*yoL6#0}oVPb2!}r{pAUYtn4{P~ES9tTfC5hXZnM{HrC8^=Pof{G4%Bh#8 ze~?C9m*|fd8MK;{L^!+wMy>=f^8b&y?yr6KnTq28$pFMBW9Oy7!oV5z|VM$s-cZ{I|Xf@}-)1=$V&x7e;9v81eiTi4O5-vs?^5pCKy2l>q);!MA zS!}M48l$scB~+Umz}7NbwyTn=rqt@`YtuwiQSMvCMFk2$83k50Q>OK5&fe*xCddIm)3D0I6vBU<+!3=6?(OhkO|b4fE_-j zimOzyfBB_*7*p8AmZi~X2bgVhyPy>KyGLAnOpou~sx9)S9%r)5dE%ADs4v%fFybDa_w*0?+>PsEHTbhKK^G=pFz z@IxLTCROWiKy*)cV3y%0FwrDvf53Ob_XuA1#tHbyn%Ko!1D#sdhBo`;VC*e1YlhrC z?*y3rp86m#qI|qeo8)_xH*G4q@70aXN|SP+6MQ!fJQqo1kwO_v7zqvUfU=Gwx`CR@ zRFb*O8+54%_8tS(ADh}-hUJzE`s*8wLI>1c4b@$al)l}^%GuIXjzBK!EWFO8W`>F^ ze7y#qPS0NI7*aU)g$_ziF(1ft;2<}6Hfz10cR8P}67FD=+}MfhrpOkF3hFhQu;Q1y zu%=jJHTr;0;oC94Hi@LAF5quAQ(rJG(uo%BiRQ@8U;nhX)j0i?0SL2g-A*YeAqF>RVCBOTrn{0R27vu}_S zS>tX4!#&U4W;ikTE!eFH+PKw%p+B(MR2I%n#+m0{#?qRP_tR@zpgCb=4rcrL!F=;A zh%EIF8m6%JG+qb&mEfuFTLHSxUAZEvC-+kvZKyX~SA3Umt`k}}c!5dy?-sLIM{h@> z!2=C)@nx>`;c9DdwZ&zeUc(7t<21D7qBj!|1^Mp1eZ6)PuvHx+poKSDCSBMFF{bKy z;9*&EyKitD99N}%mK8431rvbT+^%|O|HV23{;RhmS{$5tf!bIPoH9RKps`-EtoW5h zo6H_!s)Dl}2gCeGF6>aZtah9iLuGd19^z0*OryPNt{70RvJSM<#Ox9?HxGg04}b^f zrVEPceD%)#0)v5$YDE?f`73bQ6TA6wV;b^x*u2Ofe|S}+q{s5gr&m~4qGd!wOu|cZ||#h_u=k*fB;R6&k?FoM+c&J;ISg70h!J7*xGus)ta4veTdW)S^@sU@ z4$OBS=a~@F*V0ECic;ht4@?Jw<9kpjBgHfr2FDPykCCz|v2)`JxTH55?b3IM={@DU z!^|9nVO-R#s{`VHypWyH0%cs;0GO3E;It6W@0gX6wZ%W|Dzz&O%m17pa19db(er}C zUId1a4#I+Ou8E1MU$g=zo%g7K(=0Pn$)Rk z<4T2u<0rD)*j+tcy2XvY+0 z0d2pqm4)4lDewsAGThQi{2Kc3&C=|OQF!vOd#WB_`4gG3@inh-4>BoL!&#ij8bw7? zqjFRDaQz!J-YGitV4}$*$hg`vv%N)@#UdzHFI2E<&_@0Uw@h_ZHf}7)G;_NUD3@18 zH5;EtugNT0*RXVK*by>WS>jaDDfe!A61Da=VpIK?mcp^W?!1S2oah^wowRnrYjl~`lgP-mv$?yb6{{S55CCu{R z$9;`dyf0Y>uM1=XSl_$01Lc1Iy68IosWN8Q9Op=~I(F<0+_kKfgC*JggjxNgK6 z-3gQm6;sm?J&;bYe&(dx4BEjvq}b`OT^RqF$J4enP1YkeBK#>l1@-K`ajbn05`0J?0daOtnzh@l3^=BkedW1EahZlRp;`j*CaT;-21&f2wU z+Nh-gc4I36Cw+;3UAc<%ySb`#+c@5y ze~en&bYV|kn?Cn|@fqmGxgfz}U!98$=drjAkMi`43I4R%&H0GKEgx-=7PF}y`+j>r zg&JF`jomnu2G{%QV~Gf_-1gx<3Ky=Md9Q3VnK=;;u0lyTBCuf^aUi?+1+`4lLE6ZK zT#(Bf`5rmr(tgTbIt?yA@y`(Ar=f>-aZ}T~>G32EM%XyFvhn&@PWCm#-<&ApLDCXT zD#(9m|V(OOo7PmE@`vD4$S5;+9IQm19dd zvMEU`)E1_F+0o0-z>YCWqg0u8ciIknU#{q02{~YX)gc_u;8;i233D66pf(IkTDxeN zL=4z2)?S$TV9=ORVr&AkZMl<4tTh(v;Ix1{`pPVqI3n2ci&4Dg+W|N8TBUfZ*WeLF zqCH_1Q0W&f9T$lx3CFJ$o@Lz$99 zW!G&@zFHxTaP!o#z^~xgF|(vrHz8R_r9eo;TX9}2ZyjslrtH=%6O)?1?cL&BT(Amp zTGFU1%%#xl&6sH-UIJk_PGk_McFn7=%yd6tAjm|lnmr8bE2le3I~L{0(ffo}TQjyo zHZZI{-}{E4ohYTlZaS$blB!h$Jq^Rf#(ch}@S+Ww&$b);8+>g84IJcLU%B-W?+IY& zslcZIR>+U4v3O9RFEW;8NpCM0w1ROG84=WpKxQ^R`{=0MZCubg3st z48AyJNEvyxn-jCPTlTwp4EKvyEwD3e%kpdY?^BH0!3n6Eb57_L%J1=a*3>|k68A}v zaW`*4YitylfD}ua8V)vb79)N_Ixw_mpp}yJGbNu+5YYOP9K-7nf*jA1#<^rb4#AcS zKg%zCI)7cotx}L&J8Bqo8O1b0q;B1J#B5N5Z$Zq=wX~nQFgUfAE{@u0+EnmK{1hg> zC{vMfFLD;L8b4L+B51&LCm|scVLPe6h02rws@kGv@R+#IqE8>Xn8i|vRq_Z`V;x6F zNeot$1Zsu`lLS92QlLWF54za6vOEKGYQMdX($0JN*cjG7HP&qZ#3+bEN$8O_PfeAb z0R5;=zXac2IZ?fxu59?Nka;1lKm|;0)6|#RxkD05P5qz;*AL@ig!+f=lW5^Jbag%2 z%9@iM0ph$WFlxS!`p31t92z~TB}P-*CS+1Oo_g;7`6k(Jyj8m8U|Q3Sh7o-Icp4kV zK}%qri5>?%IPfamXIZ8pXbm-#{ytiam<{a5A+3dVP^xz!Pvirsq7Btv?*d7eYgx7q zWFxrzb3-%^lDgMc=Vl7^={=VDEKabTG?VWqOngE`Kt7hs236QKidsoeeUQ_^FzsXjprCDd@pW25rNx#6x&L6ZEpoX9Ffzv@olnH3rGOSW( zG-D|cV0Q~qJ>-L}NIyT?T-+x+wU%;+_GY{>t(l9dI%Ximm+Kmwhee;FK$%{dnF;C% zFjM2&$W68Sz#d*wtfX?*WIOXwT;P6NUw}IHdk|)fw*YnGa0rHx#paG!m=Y6GkS4VX zX`T$4eW9k1W!=q8!(#8A9h67fw))k_G)Q9~Q1e3f`aV@kbcSv7!priDUN}gX(iXTy zr$|kU0Vn%*ylmyDCO&G0Z3g>%JeEPFAW!5*H2Ydl>39w3W+gEUjL&vrRs(xGP{(ze zy7EMWF14@Qh>X>st8_029||TP0>7SG9on_xxeR2Iam3G~Em$}aGsNt$iES9zFa<3W zxtOF*!G@=PhfHO!=9pVPXMUVi30WmkPoy$02w}&6A7mF)G6-`~EVq5CwD2`9Zu`kd)52``#V zNSb`9dG~8(dooi1*-aSMf!fun7Sc`-C$-E(3BoSC$2kKrVcI!&yC*+ff2+C-@!AT_ zsvlAIV+%bRDfd{R*TMF><1&_a%@yZ0G0lg2K;F>7b+7A6pv3-S7qWIgx+Z?dt8}|S z>Qbb6x(+^aoV7FQ!Ph8|RUA6vXWQH*1$GJC+wXLXizNIc9p2yLzw9 z0=MdQ!{NnOwIICJc8!+Jp!zG}**r#E!<}&Te&}|B4q;U57$+pQI^}{qj669zMMe_I z&z0uUCqG%YwtUc8HVN7?0GHpu=bL7&{C>hcd5d(iFV{I5c~jpX&!(a{yS*4MEoYXh z*X4|Y@RVfn;piRm-C%b@{0R;aXrjBtvx^HO;6(>i*RnoG0Rtcd25BT6edxTNOgUAOjn zJ2)l{ipj8IP$KID2}*#F=M%^n&=bA0tY98@+2I+7~A&T-tw%W#3GV>GTmkHaqftl)#+E zMU*P(Rjo>8%P@_@#UNq(_L{}j(&-@1iY0TRizhiATJrnvwSH0v>lYfCI2ex^><3$q znzZgpW0JlQx?JB#0^^s-Js1}}wKh6f>(e%NrMwS`Q(FhazkZb|uyB@d%_9)_xb$6T zS*#-Bn)9gmobhAtvBmL+9H-+0_0US?g6^TOvE8f3v=z3o%NcPjOaf{5EMRnn(_z8- z$|m0D$FTU zDy;21v-#0i)9%_bZ7eo6B9@Q@&XprR&oKl4m>zIj-fiRy4Dqy@VVVs?rscG| zmzaDQ%>AQTi<^vYCmv#KOTd@l7#2VIpsj?nm_WfRZzJako`^uU%Nt3e;cU*y*|$7W zLm%fX#i_*HoUXu!NI$ey>BA<5HQB=|nRAwK!$L#n-Qz;~`zACig0PhAq#^5QS<8L2 zS3A+8%vbVMa7LOtTEM?55apt(DcWh#L}R^P2AY*c8B}Cx=6OFAdMPj1f>k3#^#+Hk z6uW1WJW&RlBRh*1DLb7mJ+KO>!t^t8hX1#_Wk`gjDio9)9IGbyCAGI4DJ~orK+YRv znjxRMtshZQHc$#Y-<-JOV6g^Cr@odj&Xw5B(FmI)*qJ9NHmIz_r{t)TxyB`L-%q5l ztzHgD;S6cw?7Atg*6E1!c6*gPRCb%t7D%z<(xm+K{%EJNiI2N0l8ud0Ch@_av_RW? zIr!nO4dL5466WslE6MsfMss7<)-S!e)2@r2o=7_W)OO`~CwklRWzHTfpB)_HYwgz=BzLhgZ9S<{nLBOwOIgJU=94uj6r!m>Xyn9>&xP+=5!zG_*yEoRgM0`aYts z^)&8(>z5C-QQ*o_s(8E4*?AX#S^0)aqB)OTyX>4BMy8h(cHjA8ji1PRlox@jB*1n? zDIfyDjzeg91Ao(;Q;KE@zei$}>EnrF6I}q&Xd=~&$WdDsyH0H7fJX|E+O~%LS*7^Q zYzZ4`pBdY{b7u72gZm6^5~O-57HwzwAz{)NvVaowo`X02tL3PpgLjwA`^i9F^vSpN zAqH3mRjG8VeJNHZ(1{%!XqC+)Z%D}58Qel{_weSEHoygT9pN@i zi=G;!Vj6XQk2tuJC>lza%ywz|`f7TIz*EN2Gdt!s199Dr4Tfd_%~fu8gXo~|ogt5Q zlEy_CXEe^BgsYM^o@L?s33WM14}7^T(kqohOX_iN@U?u;$l|rAvn{rwy>!yfZw13U zB@X9)qt&4;(C6dP?yRsoTMI!j-f1KC!<%~i1}u7yLXYn)(#a;Z6~r>hp~kfP));mi zcG%kdaB9H)z9M=H!f>kM->fTjRVOELNwh1amgKQT=I8J66kI)u_?0@$$~5f`u%;zl zC?pkr^p2Fe=J~WK%4ItSzKA+QHqJ@~m|Cduv=Q&-P8I5rQ-#G@bYH}YJr zUS(~(w|vKyU(T(*py}jTUp%I%{2!W!K(i$uvotcPjVddW z8_5HKY!oBCwGZcs-q`4Yt`Zk~>K?mcxg51wkZlX5e#B08I75F7#dgn5yf&Hrp`*%$ zQ;_Qg>TYRzBe$x=T(@WI9SC!ReSas9vDm(yslQjBJZde5z8GDU``r|N(MHcxNopGr z_}u39W_zwWDL*XYYt>#Xo!9kL#97|EAGyGBcRXtLTd59x%m=3i zL^9joWYA)HfL15l9%H?q`$mY27!<9$7GH(kxb%MV>`}hR4a?+*LH6aR{dzrX@?6X4 z3e`9L;cjqYb`cJmophbm(OX0b)!AFG?5`c#zLagzMW~o)?-!@e80lvk!p#&CD8u5_r&wp4O0zQ>y!k5U$h_K;rWGk=U)zX!#@Q%|9g*A zWx)qS1?fq6X<$mQTB$#3g;;5tHOYuAh;YKSBz%il3Ui6fPRv#v62SsrCdMRTav)Sg zTq1WOu&@v$Ey;@^+_!)cf|w_X<@RC>!=~+A1-65O0bOFYiH-)abINwZvFB;hJjL_$ z(9iScmUdMp2O$WW!520Hd0Q^Yj?DK%YgJD^ez$Z^?@9@Ab-=KgW@n8nC&88)TDC+E zlJM)L3r+ZJfZW_T$;Imq*#2<(j+FIk8ls7)WJ6CjUu#r5PoXxQs4b)mZza<8=v{o)VlLRM<9yw^0En#tXAj`Sylxvki{<1DPe^ zhjHwx^;c8tb?Vr$6ZB;$Ff$+3(*oinbwpN-#F)bTsXq@Sm?43MC#jQ~`F|twI=7oC zH4TJtu#;ngRA|Y~w5N=UfMZi?s0%ZmKUFTAye&6Y*y-%c1oD3yQ%IF2q2385Zl+=> zfz=o`Bedy|U;oxbyb^rB9ixG{Gb-{h$U0hVe`J;{ql!s_OJ_>>eoQn(G6h7+b^P48 zG<=Wg2;xGD-+d@UMZ!c;0>#3nws$9kIDkK13IfloGT@s14AY>&>>^#>`PT7GV$2Hp zN<{bN*ztlZu_%W=&3+=#3bE(mka6VoHEs~0BjZ$+=0`a@R$iaW)6>wp2w)=v2@|2d z%?34!+iOc5S@;AAC4hELWLH56RGxo4jw8MDMU0Wk2k_G}=Vo(>eRFo(g3@HjG|`H3 zm8b*dK=moM*oB<)*A$M9!!5o~4U``e)wxavm@O_R(`P|u%9^LGi(_%IF<6o;NLp*0 zKsfZ0#24GT8(G`i4UvoMh$^;kOhl?`0yNiyrC#HJH=tqOH^T_d<2Z+ zeN>Y9Zn!X4*DMCK^o75Zk2621bdmV7Rx@AX^alBG4%~;G_vUoxhfhFRlR&+3WwF^T zaL)8xPq|wCZoNT^>3J0K?e{J-kl+hu2rZI>CUv#-z&u@`hjeb+bBZ>bcciQVZ{SbW zez04s9oFEgc8Z+Kp{XFX`MVf-s&w9*dx7wLen(_@y34}Qz@&`$2+osqfxz4&d}{Ql z*g1ag00Gu+$C`0avds{Q65BfGsu9`_`dML*rX~hyWIe$T>CsPRoLIr%MTk3pJ^2zH1qub1MBzPG}PO;Wmav9w%F7?%l=xIf#LlP`! z_Nw;xBQY9anH5-c8A4mME}?{iewjz(Sq-29r{fV;Fc>fv%0!W@(+{={Xl-sJ6aMoc z)9Q+$bchoTGTyWU_oI19!)bD=IG&OImfy;VxNXoIO2hYEfO~MkE#IXTK(~?Z&!ae! zl8z{D&2PC$Q*OBC(rS~-*-GHNJ6AC$@eve>LB@Iq;jbBZj`wk4|LGogE||Ie=M5g= z9d`uYQ1^Sr_q2wmZE>w2WG)!F%^KiqyaDtIAct?}D~JP4shTJy5Bg+-(EA8aXaxbd~BKMtTf2iQ69jD1o* zZF9*S3!v-TdqwK$%&?91Sh2=e63;X0Lci@n7y3XOu2ofyL9^-I767eHESAq{m+@*r zbVDx!FQ|AjT;!bYsXv8ilQjy~Chiu&HNhFXt3R_6kMC8~ChEFqG@MWu#1Q1#=~#ix zrkHpJre_?#r=N0wv`-7cHHqU`phJX2M_^{H0~{VP79Dv{6YP)oA1&TSfKPEPZn2)G z9o{U1huZBLL;Tp_0OYw@+9z(jkrwIGdUrOhKJUbwy?WBt zlIK)*K0lQCY0qZ!$%1?3A#-S70F#YyUnmJF*`xx?aH5;gE5pe-15w)EB#nuf6B*c~ z8Z25NtY%6Wlb)bUA$w%HKs5$!Z*W?YKV-lE0@w^{4vw;J>=rn?u!rv$&eM+rpU6rc=j9>N2Op+C{D^mospMCjF2ZGhe4eADA#skp2EA26%p3Ex9wHW8l&Y@HX z$Qv)mHM}4*@M*#*ll5^hE9M^=q~eyWEai*P;4z<9ZYy!SlNE5nlc7gm;M&Q zKhKE4d*%A>^m0R?{N}y|i6i^k>^n4(wzKvlQeHq{l&JuFD~sTsdhs`(?lFK@Q{pU~ zb!M3c@*3IwN1RUOVjY5>uT+s-2QLWY z4T2>fiSn>>Fob+%B868-v9D@AfWr#M8eM6w#eAlhc#zk6jkLxGBGk`E3$!A@*am!R zy>29&ptYK6>cvP`b!syNp)Q$0UOW|-O@)8!?94GOYF_}+zlW%fCEl|Tep_zx05g6q z>tp47e-&R*hSNe{6{H!mL?+j$c^TXT{C&@T-xIaesNCl05 z9SLb@q&mSb)I{VXMaiWa3PWj=Ed!>*GwUe;^|uk=Pz$njNnfFY^MM>E?zqhf6^{}0 zx&~~dA5#}1ig~7HvOQ#;d9JZBeEQ+}-~v$at`m!(ai z$w(H&mWCC~;PQ1$%iuz3`>dWeb3_p}X>L2LK%2l59Tyc}4m0>9A!8rhoU3m>i2+hl zx?*qs*c^j}+WPs>&v1%1Ko8_ivAGIn@QK7A`hDz-Emkcgv2@wTbYhkiwX2l=xz*XG zaiNg+j4F-I>9v+LjosI-QECrtKjp&0T@xIMKVr+&)gyb4@b3y?2CA?=ooN zT#;rU86WLh(e@#mF*rk(NV-qSIZyr z$6!ZUmzD)%yO-ot`rw3rp6?*_l*@Z*IB0xn4|BGPWHNc-1ZUnNSMWmDh=EzWJRP`) zl%d%J613oXzh5;VY^XWJi{lB`f#u+ThvtP7 zq(HK<4>tw(=yzSBWtYO}XI`S1pMBe3!jFxBHIuwJ(@%zdQFi1Q_hU2eDuHqXte7Ki zOV55H2D6u#4oTfr7|u*3p75KF&jaLEDpxk!4*bhPc%mpfj)Us3XIG3 zIKMX^s^1wt8YK7Ky^UOG=w!o5e7W-<&c|fw2{;Q11vm@J{)@N3-p1U>!0~sKWHaL= zWV(0}1IIyt1p%=_-Fe5Kfzc71wg}`RDDntVZv;4!=&XXF-$48jS0Sc;eDy@Sg;+{A zFStc{dXT}kcIjMXb4F7MbX~2%i;UrBxm%qmLKb|2=?uPr00-$MEUIGR5+JG2l2Nq` zkM{{1RO_R)+8oQ6x&-^kCj)W8Z}TJjS*Wm4>hf+4#VJP)OBaDF%3pms7DclusBUw} z{ND#!*I6h85g6DzNvdAmnwWY{&+!KZM4DGzeHI?MR@+~|su0{y-5-nICz_MIT_#FE zm<5f3zlaKq!XyvY3H`9s&T};z!cK}G%;~!rpzk9-6L}4Rg7vXtKFsl}@sT#U#7)x- z7UWue5sa$R>N&b{J61&gvKcKlozH*;OjoDR+elkh|4bJ!_3AZNMOu?n9&|L>OTD78 z^i->ah_Mqc|Ev)KNDzfu1P3grBIM#%`QZqj5W{qu(HocQhjyS;UINoP`{J+DvV?|1 z_sw6Yr3z6%e7JKVDY<$P=M)dbk@~Yw9|2!Cw!io3%j92wTD!c^e9Vj+7VqXo3>u#= zv#M{HHJ=e$X5vQ>>ML?E8#UlmvJgTnb73{PSPTf*0)mcj6C z{KsfUbDK|F$E(k;ER%8HMdDi`=BfpZzP3cl5yJHu;v^o2FkHNk;cXc17tL8T!CsYI zfeZ6sw@;8ia|mY_AXjCS?kUfxdjDB28)~Tz1dGE|{VfBS9`0m2!m1yG?hR})er^pl4c@9Aq+|}ZlDaHL)K$O| z%9Jp-imI-Id0|(d5{v~w6mx)tUKfbuVD`xNt04Mry%M+jXzE>4(TBsx#&=@wT2Vh) z1yeEY&~17>0%P(eHP0HB^|7C+WJxQBTG$uyOWY@iDloRIb-Cf!p<{WQHR!422#F34 zG`v|#CJ^G}y9U*7jgTlD{D&y$Iv{6&PYG>{Ixg$pGk?lWrE#PJ8KunQC@}^6OP!|< zS;}p3to{S|uZz%kKe|;A0bL0XxPB&Q{J(9PyX`+Kr`k~r2}yP^ND{8!v7Q1&vtk& z2Y}l@J@{|2`oA%sxvM9i0V+8IXrZ4;tey)d;LZI70Kbim<4=WoTPZy=Yd|34v#$Kh zx|#YJ8s`J>W&jt#GcMpx84w2Z3ur-rK7gf-p5cE)=w1R2*|0mj12hvapuUWM0b~dG zMg9p8FmAZI@i{q~0@QuY44&mMUNXd7z>U58shA3o`p5eVLpq>+{(<3->DWuSFVZwC zxd50Uz(w~LxC4}bgag#q#NNokK@yNc+Q|Ap!u>Ddy+df>v;j@I12CDNN9do+0^n8p zMQs7X#+FVF0C5muGfN{r0|Nkql%BQT|K(DDNdR2pzM=_ea5+GO|J67`05AV92t@4l z0Qno0078PIHdaQGHZ~Scw!dzgqjK~3B7kf>BcP__&lLyU(cu3B^uLo%{j|Mb0NR)tkeT7Hcwp4O# z)yzu>cvG(d9~0a^)eZ;;%3ksk@F&1eEBje~ zW+-_s)&RgiweQc!otF>4%vbXKaOU41{!hw?|2`Ld3I8$&#WOsq>EG)1ANb!{N4z9@ zsU!bPG-~-bqCeIDzo^Q;gnucB{tRzm{ZH^Orphm2U+REA!*<*J6YQV83@&xoDl%#wnl5qcBqCcAF-vX5{30}(oJrnSH z{RY85hylK2dMOh2%oO1J8%)0?8TOL%rS8)+CsDv}aQ>4D)Jv+DLK)9gI^n-T^$)Tc zFPUD75qJm!Y-KBqj;JP4dV4 z`X{lGmn<)1IGz330}s}Jrjtf{(lnuuNHe5(ezA(pYa=1|Ff-LhPFK8 zyJh_b{yzu0yll6ZkpRzRjezyYivjyjW7QwO;@6X`m;2Apn2EK2!~7S}-*=;5*7K$B z`x(=!^?zgj(-`&ApZJXI09aDLXaT@<;CH=?fBOY5d|b~wBA@@p^K#nxr`)?i?SqTupI_PJ(A3cx`z~9mX_*)>L F{|7XC?P&l2 literal 0 HcmV?d00001 diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 00000000..30b572c7 --- /dev/null +++ b/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,5 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-6.0.1-all.zip diff --git a/gradlew b/gradlew new file mode 100644 index 00000000..cccdd3d5 --- /dev/null +++ b/gradlew @@ -0,0 +1,172 @@ +#!/usr/bin/env sh + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=$(save "$@") + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong +if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then + cd "$(dirname "$0")" +fi + +exec "$JAVACMD" "$@" diff --git a/gradlew.bat b/gradlew.bat new file mode 100644 index 00000000..f9553162 --- /dev/null +++ b/gradlew.bat @@ -0,0 +1,84 @@ +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS= + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windows variants + +if not "%OS%" == "Windows_NT" goto win9xME_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/module.gradle b/module.gradle new file mode 100644 index 00000000..83811406 --- /dev/null +++ b/module.gradle @@ -0,0 +1,23 @@ +ext { + moduleId = "il2cppdumper" // FIXME replace with yours + riruApiVersion = 7 + riruMinVersionName = "v21.2" + + moduleProp = [ + name : "Il2CppDumper", // FIXME replace with yours + version : "v1.0", // FIXME replace with yours + versionCode: "1", // FIXME replace with yours + author : "Perfare", // FIXME replace with yours + description: "Il2CppDumper Riru version.", // FIXME replace with yours + api : riruApiVersion + ] + + magiskModuleProp = [ + id : "riru-${moduleId.replace('_', '-')}", + name : "Riru - ${moduleProp['name']}", + version : moduleProp['version'], + versionCode: moduleProp['versionCode'], + author : moduleProp['author'], + description: moduleProp['description'] + ] +} \ No newline at end of file diff --git a/module/.gitignore b/module/.gitignore new file mode 100644 index 00000000..9833d4bf --- /dev/null +++ b/module/.gitignore @@ -0,0 +1,3 @@ +/.externalNativeBuild +/build +/release \ No newline at end of file diff --git a/module/build.gradle b/module/build.gradle new file mode 100644 index 00000000..862b5fa2 --- /dev/null +++ b/module/build.gradle @@ -0,0 +1,115 @@ +apply plugin: 'com.android.library' +apply from: file(rootProject.file('module.gradle')) + +android { + compileSdkVersion rootProject.ext.targetSdkVersion + defaultConfig { + minSdkVersion rootProject.ext.minSdkVersion + targetSdkVersion rootProject.ext.targetSdkVersion + externalNativeBuild { + cmake { + arguments "-DMODULE_NAME:STRING=riru_$moduleId" + } + } + } + externalNativeBuild { + cmake { + path "src/main/cpp/CMakeLists.txt" + version "3.10.2" + } + } +} + +def outDir = file("$rootDir/out") +def magiskDir = file("$outDir/magisk_module") +def zipName = "magisk-${magiskModuleProp['id'].replace('_', '-')}-${magiskModuleProp['version']}.zip" +def riruDir = "$magiskDir/riru" + +import java.nio.file.Files +import java.security.MessageDigest + +static def calcSha256(file) { + def md = MessageDigest.getInstance("SHA-256") + file.eachByte 4096, { bytes, size -> + md.update(bytes, 0, size); + } + return md.digest().encodeHex() +} + +static def renameOrFail(File from, File to) { + if (!from.renameTo(to)) { + throw new IOException("Unable reanme file $from to $to") + } +} + +import org.apache.tools.ant.filters.FixCrLfFilter + +android.libraryVariants.all { variant -> + def task = variant.assembleProvider.get() + task.doLast { + // clear + delete { delete magiskDir } + + // copy from template + copy { + from "$rootDir/template/magisk_module" + into magiskDir.path + exclude 'riru.sh' + } + // copy riru.sh + copy { + from "$rootDir/template/magisk_module" + into magiskDir.path + include 'riru.sh' + filter { line -> + line.replaceAll('%%%RIRU_MODULE_ID%%%', moduleId) + .replaceAll('%%%RIRU_MIN_API_VERSION%%%', riruApiVersion.toString()) + .replaceAll('%%%RIRU_MIN_VERSION_NAME%%%', riruMinVersionName) + } + filter(FixCrLfFilter.class, + eol: FixCrLfFilter.CrLf.newInstance("lf")) + } + // copy .git files manually since gradle exclude it by default + Files.copy(file("$rootDir/template/magisk_module/.gitattributes").toPath(), file("${magiskDir.path}/.gitattributes").toPath()) + + // generate module.prop + def modulePropText = "" + magiskModuleProp.each { k, v -> modulePropText += "$k=$v\n" } + modulePropText = modulePropText.trim() + file("$magiskDir/module.prop").text = modulePropText + + // generate module.prop for Riru + def riruModulePropText = "" + moduleProp.each { k, v -> riruModulePropText += "$k=$v\n" } + riruModulePropText = riruModulePropText.trim() + file(riruDir).mkdirs() + + // module.prop.new will be renamed to module.prop in post-fs-data.sh + file("$riruDir/module.prop.new").text = riruModulePropText + + // copy native files + def nativeOutDir = file("build/intermediates/cmake/$variant.name/obj") + + file("$magiskDir/system").mkdirs() + file("$magiskDir/system_x86").mkdirs() + renameOrFail(file("$nativeOutDir/arm64-v8a"), file("$magiskDir/system/lib64")) + renameOrFail(file("$nativeOutDir/armeabi-v7a"), file("$magiskDir/system/lib")) + renameOrFail(file("$nativeOutDir/x86_64"), file("$magiskDir/system_x86/lib64")) + renameOrFail(file("$nativeOutDir/x86"), file("$magiskDir/system_x86/lib")) + + // generate sha1sum + fileTree("$magiskDir").matching { + exclude "README.md", "META-INF" + }.visit { f -> + if (f.directory) return + file(f.file.path + ".sha256sum").text = calcSha256(f.file) + } + } + task.finalizedBy zipMagiskMoudle +} + +task zipMagiskMoudle(type: Zip) { + from magiskDir + archiveName zipName + destinationDir outDir +} \ No newline at end of file diff --git a/module/src/main/AndroidManifest.xml b/module/src/main/AndroidManifest.xml new file mode 100644 index 00000000..235c6578 --- /dev/null +++ b/module/src/main/AndroidManifest.xml @@ -0,0 +1 @@ + diff --git a/module/src/main/cpp/CMakeLists.txt b/module/src/main/cpp/CMakeLists.txt new file mode 100644 index 00000000..b9f2f5fd --- /dev/null +++ b/module/src/main/cpp/CMakeLists.txt @@ -0,0 +1,29 @@ +cmake_minimum_required(VERSION 3.4.1) + +if (NOT DEFINED MODULE_NAME) + message(FATAL_ERROR "MODULE_NAME is not set") +endif () + +message("Build type: ${CMAKE_BUILD_TYPE}") +add_subdirectory(whale) +set(CMAKE_CXX_STANDARD 11) + +set(LINKER_FLAGS "-ffixed-x18 -Wl,--hash-style=both") +set(C_FLAGS "-Werror=format -fdata-sections -ffunction-sections") + +if (CMAKE_BUILD_TYPE STREQUAL "Release") + set(C_FLAGS "${C_FLAGS} -O2 -fvisibility=hidden -fvisibility-inlines-hidden") + set(LINKER_FLAGS "${LINKER_FLAGS} -Wl,-exclude-libs,ALL -Wl,--gc-sections") +else () + set(C_FLAGS "${C_FLAGS} -O0") +endif () + +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${C_FLAGS}") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${C_FLAGS}") + +set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${LINKER_FLAGS}") +set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${LINKER_FLAGS}") + +add_library(${MODULE_NAME} SHARED main.cpp hook.cpp il2cpp.cpp) +target_link_libraries(${MODULE_NAME} log whale) +set_target_properties(${MODULE_NAME} PROPERTIES LINK_FLAGS_RELEASE -s) diff --git a/module/src/main/cpp/game.h b/module/src/main/cpp/game.h new file mode 100644 index 00000000..cee6b183 --- /dev/null +++ b/module/src/main/cpp/game.h @@ -0,0 +1,36 @@ +// +// Created by Perfare on 2020/7/4. +// + +#ifndef RIRU_IL2CPPDUMPER_GAME_H +#define RIRU_IL2CPPDUMPER_GAME_H + +#define GamePackageName "com.game.packagename" +#define UnityVersion 2017.2.1f1 +// UnityVersion Compatible list +// <= 5.3.1f1 not support +// 5.3.2f1 | 5.3.2f1 | v19 +// 5.3.3f1 | 5.3.3f1 - 5.3.4f1 | v20 +// 5.3.5f1 | 5.3.5f1 | v21 +// 5.3.6f1 | 5.3.6f1 | v21 +// 5.3.7f1 | 5.3.7f1 - 5.3.8f2 | v21 +// 5.4.0f3 | 5.4.0f3 | v21 +// 5.4.1f1 | 5.4.1f1 - 5.4.3f1 | v21 +// 5.4.4f1 | 5.4.4f1 - 5.4.6f3 | v21 +// 5.5.0f3 | 5.5.0f3 | v22 +// 5.5.1f1 | 5.5.1f1 - 5.5.6f1 | v22 +// 5.6.0f3 | 5.6.0f3 - 5.6.7f1 | v23 +// 2017.1.0f3 | 2017.1.0f3 - 2017.1.2f1 | v24 +// 2017.1.3f1 | 2017.1.3f1 - 2017.1.5f1 | v24 +// 2017.2.0f3 | 2017.2.0f3 | v24 +// 2017.2.1f1 | 2017.2.1f1 - 2017.4.38f1 | v24 +// 2018.1.0f2 | 2018.1.0f2 - 2018.1.9f2 | v24 +// 2018.2.0f2 | 2018.2.0f2 - 2018.2.21f1 | v24 +// 2018.3.0f2 | 2018.3.0f2 - 2018.3.7f1 | v24.1 +// 2018.3.8f1 | 2018.3.8f1 - 2018.4.17f1 | v24.1 +// 2018.4.18f1 | 2018.4.18f1 - 2018.4.20f1 | v24.1 +// 2019.1.0f2 | 2019.1.0f2 - 2019.1.14f1 | v24.2 +// 2019.2.0f1 | 2019.2.0f1 - 2019.2.21f1 | v24.2 +// 2019.3.0f6 | 2019.3.0f6 - 2019.3.6f1 | v24.2 +// 2019.3.7f1 | 2019.3.7f1 - 2019.4.2f1 | v24.3 +#endif //RIRU_IL2CPPDUMPER_GAME_H diff --git a/module/src/main/cpp/hook.cpp b/module/src/main/cpp/hook.cpp new file mode 100644 index 00000000..3d7eaf42 --- /dev/null +++ b/module/src/main/cpp/hook.cpp @@ -0,0 +1,111 @@ +// +// Created by Perfare on 2020/7/4. +// + +#include "hook.h" +#include +#include +#include +#include +#include +#include +#include "il2cpp.h" +#include "game.h" + +int isGame(JNIEnv *env, jstring appDataDir) { + if (!appDataDir) + return 0; + const char *app_data_dir = env->GetStringUTFChars(appDataDir, NULL); + int user = 0; + static char package_name[256]; + if (sscanf(app_data_dir, "/data/%*[^/]/%d/%s", &user, package_name) != 2) { + if (sscanf(app_data_dir, "/data/%*[^/]/%s", package_name) != 1) { + package_name[0] = '\0'; + LOGW("can't parse %s", app_data_dir); + return 0; + } + } + if (strcmp(package_name, GamePackageName) == 0) { + LOGI("detect game: %s", package_name); + game_data_dir = new char[strlen(app_data_dir) + 1]; + strcpy(game_data_dir, app_data_dir); + env->ReleaseStringUTFChars(appDataDir, app_data_dir); + return 1; + } else { + env->ReleaseStringUTFChars(appDataDir, app_data_dir); + return 0; + } +} + +static int GetAndroidApiLevel() { + char prop_value[PROP_VALUE_MAX]; + __system_property_get("ro.build.version.sdk", prop_value); + return atoi(prop_value); +} + +void dlopen_process(const char *name, void *handle) { + //LOGD("dlopen: %s", name); + if (!il2cpp_handle) { + if (strstr(name, "libil2cpp.so")) { + il2cpp_handle = handle; + LOGI("Got il2cpp handle!"); + } + } +} + +HOOK_DEF(void*, __loader_dlopen, const char *filename, int flags, const void *caller_addr) { + void *handle = orig___loader_dlopen(filename, flags, caller_addr); + dlopen_process(filename, handle); + return handle; +} + +HOOK_DEF(void*, do_dlopen_V24, const char *name, int flags, const void *extinfo, + void *caller_addr) { + void *handle = orig_do_dlopen_V24(name, flags, extinfo, caller_addr); + dlopen_process(name, handle); + return handle; +} + +HOOK_DEF(void*, do_dlopen_V19, const char *name, int flags, const void *extinfo) { + void *handle = orig_do_dlopen_V19(name, flags, extinfo); + dlopen_process(name, handle); + return handle; +} + +void *hack_thread(void *arg) { + LOGI("hack thread: %d", gettid()); + int api_level = GetAndroidApiLevel(); + LOGI("api level: %d", api_level); + if (api_level > 25) { + void *libdl_handle = dlopen("libdl.so", RTLD_LAZY); + void *__loader_dlopen_addr = dlsym(libdl_handle, "__loader_dlopen"); + LOGI("__loader_dlopen at: %p", __loader_dlopen_addr); + WInlineHookFunction(__loader_dlopen_addr, (void *) new___loader_dlopen, + (void **) &orig___loader_dlopen); + } else { + void *linker_handle = WDynamicLibOpen(kLinkerPath); + if (api_level > 23) { + void *symbol = WDynamicLibSymbol(linker_handle, + "__dl__Z9do_dlopenPKciPK17android_dlextinfoPv"); + if (symbol) { + LOGI("do_dlopen at: %p", symbol); + WInlineHookFunction(symbol, (void *) new_do_dlopen_V24, + (void **) &orig_do_dlopen_V24); + } + } else { + void *symbol = WDynamicLibSymbol(linker_handle, + "__dl__Z9do_dlopenPKciPK17android_dlextinfo"); + if (symbol) { + LOGI("do_dlopen at: %p", symbol); + WInlineHookFunction(symbol, (void *) new_do_dlopen_V19, + (void **) &orig_do_dlopen_V19); + } + } + } + while (!il2cpp_handle) { + sleep(1); + } + sleep(2); + il2cpp_dump(il2cpp_handle, game_data_dir); + return NULL; +} \ No newline at end of file diff --git a/module/src/main/cpp/hook.h b/module/src/main/cpp/hook.h new file mode 100644 index 00000000..b7669955 --- /dev/null +++ b/module/src/main/cpp/hook.h @@ -0,0 +1,29 @@ +// +// Created by Perfare on 2020/7/4. +// + +#ifndef RIRU_IL2CPPDUMPER_HOOK_H +#define RIRU_IL2CPPDUMPER_HOOK_H + +#include +#include "log.h" + +static int enable_hack; +static void *il2cpp_handle = NULL; +static char *game_data_dir = NULL; + +int isGame(JNIEnv *env, jstring appDataDir); + +void *hack_thread(void *arg); + +#define HOOK_DEF(ret, func, ...) \ + ret (*orig_##func)(__VA_ARGS__); \ + ret new_##func(__VA_ARGS__) + +#ifdef __LP64__ +static constexpr const char *kLinkerPath = "/system/bin/linker64"; +#else +static constexpr const char *kLinkerPath = "/system/bin/linker"; +#endif + +#endif //RIRU_IL2CPPDUMPER_HOOK_H diff --git a/module/src/main/cpp/il2cpp-tabledefs.h b/module/src/main/cpp/il2cpp-tabledefs.h new file mode 100644 index 00000000..fbbef5f2 --- /dev/null +++ b/module/src/main/cpp/il2cpp-tabledefs.h @@ -0,0 +1,152 @@ +#pragma once + +/* + * Field Attributes (21.1.5). + */ + +#define FIELD_ATTRIBUTE_FIELD_ACCESS_MASK 0x0007 +#define FIELD_ATTRIBUTE_COMPILER_CONTROLLED 0x0000 +#define FIELD_ATTRIBUTE_PRIVATE 0x0001 +#define FIELD_ATTRIBUTE_FAM_AND_ASSEM 0x0002 +#define FIELD_ATTRIBUTE_ASSEMBLY 0x0003 +#define FIELD_ATTRIBUTE_FAMILY 0x0004 +#define FIELD_ATTRIBUTE_FAM_OR_ASSEM 0x0005 +#define FIELD_ATTRIBUTE_PUBLIC 0x0006 + +#define FIELD_ATTRIBUTE_STATIC 0x0010 +#define FIELD_ATTRIBUTE_INIT_ONLY 0x0020 +#define FIELD_ATTRIBUTE_LITERAL 0x0040 +#define FIELD_ATTRIBUTE_NOT_SERIALIZED 0x0080 +#define FIELD_ATTRIBUTE_SPECIAL_NAME 0x0200 +#define FIELD_ATTRIBUTE_PINVOKE_IMPL 0x2000 + +/* For runtime use only */ +#define FIELD_ATTRIBUTE_RESERVED_MASK 0x9500 +#define FIELD_ATTRIBUTE_RT_SPECIAL_NAME 0x0400 +#define FIELD_ATTRIBUTE_HAS_FIELD_MARSHAL 0x1000 +#define FIELD_ATTRIBUTE_HAS_DEFAULT 0x8000 +#define FIELD_ATTRIBUTE_HAS_FIELD_RVA 0x0100 + +/* + * Method Attributes (22.1.9) + */ + +#define METHOD_IMPL_ATTRIBUTE_CODE_TYPE_MASK 0x0003 +#define METHOD_IMPL_ATTRIBUTE_IL 0x0000 +#define METHOD_IMPL_ATTRIBUTE_NATIVE 0x0001 +#define METHOD_IMPL_ATTRIBUTE_OPTIL 0x0002 +#define METHOD_IMPL_ATTRIBUTE_RUNTIME 0x0003 + +#define METHOD_IMPL_ATTRIBUTE_MANAGED_MASK 0x0004 +#define METHOD_IMPL_ATTRIBUTE_UNMANAGED 0x0004 +#define METHOD_IMPL_ATTRIBUTE_MANAGED 0x0000 + +#define METHOD_IMPL_ATTRIBUTE_FORWARD_REF 0x0010 +#define METHOD_IMPL_ATTRIBUTE_PRESERVE_SIG 0x0080 +#define METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL 0x1000 +#define METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED 0x0020 +#define METHOD_IMPL_ATTRIBUTE_NOINLINING 0x0008 +#define METHOD_IMPL_ATTRIBUTE_MAX_METHOD_IMPL_VAL 0xffff + +#define METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK 0x0007 +#define METHOD_ATTRIBUTE_COMPILER_CONTROLLED 0x0000 +#define METHOD_ATTRIBUTE_PRIVATE 0x0001 +#define METHOD_ATTRIBUTE_FAM_AND_ASSEM 0x0002 +#define METHOD_ATTRIBUTE_ASSEM 0x0003 +#define METHOD_ATTRIBUTE_FAMILY 0x0004 +#define METHOD_ATTRIBUTE_FAM_OR_ASSEM 0x0005 +#define METHOD_ATTRIBUTE_PUBLIC 0x0006 + +#define METHOD_ATTRIBUTE_STATIC 0x0010 +#define METHOD_ATTRIBUTE_FINAL 0x0020 +#define METHOD_ATTRIBUTE_VIRTUAL 0x0040 +#define METHOD_ATTRIBUTE_HIDE_BY_SIG 0x0080 + +#define METHOD_ATTRIBUTE_VTABLE_LAYOUT_MASK 0x0100 +#define METHOD_ATTRIBUTE_REUSE_SLOT 0x0000 +#define METHOD_ATTRIBUTE_NEW_SLOT 0x0100 + +#define METHOD_ATTRIBUTE_STRICT 0x0200 +#define METHOD_ATTRIBUTE_ABSTRACT 0x0400 +#define METHOD_ATTRIBUTE_SPECIAL_NAME 0x0800 + +#define METHOD_ATTRIBUTE_PINVOKE_IMPL 0x2000 +#define METHOD_ATTRIBUTE_UNMANAGED_EXPORT 0x0008 + +/* + * For runtime use only + */ +#define METHOD_ATTRIBUTE_RESERVED_MASK 0xd000 +#define METHOD_ATTRIBUTE_RT_SPECIAL_NAME 0x1000 +#define METHOD_ATTRIBUTE_HAS_SECURITY 0x4000 +#define METHOD_ATTRIBUTE_REQUIRE_SEC_OBJECT 0x8000 + +/* +* Type Attributes (21.1.13). +*/ +#define TYPE_ATTRIBUTE_VISIBILITY_MASK 0x00000007 +#define TYPE_ATTRIBUTE_NOT_PUBLIC 0x00000000 +#define TYPE_ATTRIBUTE_PUBLIC 0x00000001 +#define TYPE_ATTRIBUTE_NESTED_PUBLIC 0x00000002 +#define TYPE_ATTRIBUTE_NESTED_PRIVATE 0x00000003 +#define TYPE_ATTRIBUTE_NESTED_FAMILY 0x00000004 +#define TYPE_ATTRIBUTE_NESTED_ASSEMBLY 0x00000005 +#define TYPE_ATTRIBUTE_NESTED_FAM_AND_ASSEM 0x00000006 +#define TYPE_ATTRIBUTE_NESTED_FAM_OR_ASSEM 0x00000007 + +#define TYPE_ATTRIBUTE_LAYOUT_MASK 0x00000018 +#define TYPE_ATTRIBUTE_AUTO_LAYOUT 0x00000000 +#define TYPE_ATTRIBUTE_SEQUENTIAL_LAYOUT 0x00000008 +#define TYPE_ATTRIBUTE_EXPLICIT_LAYOUT 0x00000010 + +#define TYPE_ATTRIBUTE_CLASS_SEMANTIC_MASK 0x00000020 +#define TYPE_ATTRIBUTE_CLASS 0x00000000 +#define TYPE_ATTRIBUTE_INTERFACE 0x00000020 + +#define TYPE_ATTRIBUTE_ABSTRACT 0x00000080 +#define TYPE_ATTRIBUTE_SEALED 0x00000100 +#define TYPE_ATTRIBUTE_SPECIAL_NAME 0x00000400 + +#define TYPE_ATTRIBUTE_IMPORT 0x00001000 +#define TYPE_ATTRIBUTE_SERIALIZABLE 0x00002000 + +#define TYPE_ATTRIBUTE_STRING_FORMAT_MASK 0x00030000 +#define TYPE_ATTRIBUTE_ANSI_CLASS 0x00000000 +#define TYPE_ATTRIBUTE_UNICODE_CLASS 0x00010000 +#define TYPE_ATTRIBUTE_AUTO_CLASS 0x00020000 + +#define TYPE_ATTRIBUTE_BEFORE_FIELD_INIT 0x00100000 +#define TYPE_ATTRIBUTE_FORWARDER 0x00200000 + +#define TYPE_ATTRIBUTE_RESERVED_MASK 0x00040800 +#define TYPE_ATTRIBUTE_RT_SPECIAL_NAME 0x00000800 +#define TYPE_ATTRIBUTE_HAS_SECURITY 0x00040000 + +/* +* Flags for Params (22.1.12) +*/ +#define PARAM_ATTRIBUTE_IN 0x0001 +#define PARAM_ATTRIBUTE_OUT 0x0002 +#define PARAM_ATTRIBUTE_OPTIONAL 0x0010 +#define PARAM_ATTRIBUTE_RESERVED_MASK 0xf000 +#define PARAM_ATTRIBUTE_HAS_DEFAULT 0x1000 +#define PARAM_ATTRIBUTE_HAS_FIELD_MARSHAL 0x2000 +#define PARAM_ATTRIBUTE_UNUSED 0xcfe0 + +// Flags for Generic Parameters (II.23.1.7) +#define IL2CPP_GENERIC_PARAMETER_ATTRIBUTE_NON_VARIANT 0x00 +#define IL2CPP_GENERIC_PARAMETER_ATTRIBUTE_COVARIANT 0x01 +#define IL2CPP_GENERIC_PARAMETER_ATTRIBUTE_CONTRAVARIANT 0x02 +#define IL2CPP_GENERIC_PARAMETER_ATTRIBUTE_VARIANCE_MASK 0x03 +#define IL2CPP_GENERIC_PARAMETER_ATTRIBUTE_REFERENCE_TYPE_CONSTRAINT 0x04 +#define IL2CPP_GENERIC_PARAMETER_ATTRIBUTE_NOT_NULLABLE_VALUE_TYPE_CONSTRAINT 0x08 +#define IL2CPP_GENERIC_PARAMETER_ATTRIBUTE_DEFAULT_CONSTRUCTOR_CONSTRAINT 0x10 +#define IL2CPP_GENERIC_PARAMETER_ATTRIBUTE_SPECIAL_CONSTRAINT_MASK 0x1C + +/** + * 21.5 AssemblyRefs + */ +#define ASSEMBLYREF_FULL_PUBLIC_KEY_FLAG 0x00000001 +#define ASSEMBLYREF_RETARGETABLE_FLAG 0x00000100 +#define ASSEMBLYREF_ENABLEJITCOMPILE_TRACKING_FLAG 0x00008000 +#define ASSEMBLYREF_DISABLEJITCOMPILE_OPTIMIZER_FLAG 0x00004000 diff --git a/module/src/main/cpp/il2cpp.cpp b/module/src/main/cpp/il2cpp.cpp new file mode 100644 index 00000000..894f1a7f --- /dev/null +++ b/module/src/main/cpp/il2cpp.cpp @@ -0,0 +1,446 @@ +// +// Created by Perfare on 2020/7/4. +// + +#include "il2cpp.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include "log.h" +#include "il2cpp-tabledefs.h" +#include IL2CPPCLASS + +#define DO_API(r, n, p) r (*n) p + +#include IL2CPPAPI + +#undef DO_API + +static void *il2cpp_handle = nullptr; +static uint64_t il2cpp_baseaddr = 0; + +void init_il2cpp_api() { +#define DO_API(r, n, p) n = (r (*) p)dlsym(il2cpp_handle, #n) + +#include IL2CPPAPI + +#undef DO_API +} + +//TODO 强化搜索,针对多个data字段 +void process_maps(uint32_t typeDefinitionsCount, uint64_t *il2cpp_addr, + uint64_t *metadataregistration_addr) { + char line[1024]; + + bool flag = false; + uint64_t start = 0; + uint64_t end = 0; + char flags[5]; + char path[PATH_MAX]; + + uint64_t data_start = 0; + uint64_t data_end = 0; + + uint64_t bss_start = 0; + uint64_t bss_end = 0; + + FILE *fp = fopen("/proc/self/maps", "r"); + if (fp != nullptr) { + while (fgets(line, sizeof(line), fp)) { + if (!flag && strstr(line, "libil2cpp.so")) { + flag = true; + char *pch = strtok(line, "-"); + *il2cpp_addr = strtoull(pch, nullptr, 16); + } + if (flag) { + sscanf(line, "%" PRIx64"-%" PRIx64" %s %*" PRIx64" %*x:%*x %*u %s\n", + &start, &end, flags, path); + if (strcmp(flags, "rw-p") == 0 && strstr(path, "libil2cpp.so")) { + LOGD("data start %" PRIx64"", start); + LOGD("data end %" PRIx64"", end); + data_start = start; + data_end = end; + } + if (strcmp(path, "[anon:.bss]") == 0) { + LOGD("bss start %" PRIx64"", start); + LOGD("bss end %" PRIx64"", end); + bss_start = start; + bss_end = end; + break; + } + } + } + fclose(fp); + } + + auto search_addr = data_start; + while (search_addr < data_end) { +#ifdef __LP64__ + search_addr += 8; +#else + search_addr += 4; +#endif + auto metadataRegistration = (Il2CppMetadataRegistration *) search_addr; + if (metadataRegistration && + metadataRegistration->typeDefinitionsSizesCount == typeDefinitionsCount) { + //LOGD("now: %" PRIx64"", search_addr - *il2cpp_addr); + auto metadataUsages_addr = (uint64_t) metadataRegistration->metadataUsages; + //LOGD("now2: %" PRIx64"", metadataUsages_addr); + if (metadataUsages_addr >= data_start && metadataUsages_addr <= data_end) { + flag = true; + for (int i = 0; i < 5000; ++i) { + auto pointer_addr = (uint64_t) metadataRegistration->metadataUsages[i]; + //LOGD("now3: %" PRIx64"", pointer_addr); + if ((pointer_addr < bss_start || pointer_addr > bss_end) && + (pointer_addr < data_start || pointer_addr > data_end)) { + flag = false; + break; + } + } + if (flag) { + LOGD("metadataregistration_rva: %" PRIx64"", search_addr - *il2cpp_addr); + *metadataregistration_addr = search_addr; + break; + } + } + } + } +} + +std::string get_method_modifier(uint16_t flags) { + std::stringstream outPut; + auto access = flags & METHOD_ATTRIBUTE_MEMBER_ACCESS_MASK; + switch (access) { + case METHOD_ATTRIBUTE_PRIVATE: + outPut << "private "; + break; + case METHOD_ATTRIBUTE_PUBLIC: + outPut << "public "; + break; + case METHOD_ATTRIBUTE_FAMILY: + outPut << "protected "; + break; + case METHOD_ATTRIBUTE_ASSEM: + case METHOD_ATTRIBUTE_FAM_AND_ASSEM: + outPut << "internal "; + break; + case METHOD_ATTRIBUTE_FAM_OR_ASSEM: + outPut << "protected internal "; + break; + } + if (flags & METHOD_ATTRIBUTE_STATIC) { + outPut << "static "; + } + if (flags & METHOD_ATTRIBUTE_ABSTRACT) { + outPut << "abstract "; + if ((flags & METHOD_ATTRIBUTE_VTABLE_LAYOUT_MASK) == METHOD_ATTRIBUTE_REUSE_SLOT) { + outPut << "override "; + } + } else if (flags & METHOD_ATTRIBUTE_FINAL) { + if ((flags & METHOD_ATTRIBUTE_VTABLE_LAYOUT_MASK) == METHOD_ATTRIBUTE_REUSE_SLOT) { + outPut << "sealed override "; + } + } else if (flags & METHOD_ATTRIBUTE_VIRTUAL) { + if ((flags & METHOD_ATTRIBUTE_VTABLE_LAYOUT_MASK) == METHOD_ATTRIBUTE_NEW_SLOT) { + outPut << "virtual "; + } else { + outPut << "override "; + } + } + if (flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) { + outPut << "extern "; + } + return outPut.str(); +} + +std::string dump_method(Il2CppClass *klass) { + std::stringstream outPut; + if (klass->method_count > 0) { + outPut << "\n\t// Methods\n"; + void *iter = nullptr; + while (auto method = il2cpp_class_get_methods(klass, &iter)) { + //TODO attribute + if (method->methodPointer) { + outPut << "\t// RVA: 0x"; + outPut << std::hex << (uint64_t) method->methodPointer - il2cpp_baseaddr; + outPut << " VA: 0x"; + outPut << std::hex << (uint64_t) method->methodPointer; + } else { + outPut << "\t// RVA: 0x VA: 0x0"; + } + if (method->slot != 65535) { + outPut << std::dec << " Slot: " << method->slot; + } + outPut << "\n\t"; + outPut << get_method_modifier(method->flags); + //TODO genericContainerIndex + auto return_type = method->return_type; + if (return_type->byref) { + outPut << "ref "; + } + auto return_class = il2cpp_class_from_type(return_type); + outPut << return_class->name << " " << method->name << "("; + for (int i = 0; i < method->parameters_count; ++i) { + auto parameters = method->parameters[i]; + auto parameter_type = parameters.parameter_type; + auto attrs = parameter_type->attrs; + if (parameter_type->byref) { + if (attrs & PARAM_ATTRIBUTE_OUT && !(attrs & PARAM_ATTRIBUTE_IN)) { + outPut << "out "; + } else if (attrs & PARAM_ATTRIBUTE_IN && !(attrs & PARAM_ATTRIBUTE_OUT)) { + outPut << "in "; + } else { + outPut << "ref "; + } + } else { + if (attrs & PARAM_ATTRIBUTE_IN) { + outPut << "[In] "; + } + if (attrs & PARAM_ATTRIBUTE_OUT) { + outPut << "[Out] "; + } + } + auto parameter_class = il2cpp_class_from_type(parameter_type); + outPut << parameter_class->name << " " << parameters.name; + //TODO DefaultValue + outPut << ", "; + } + if (method->parameters_count > 0) { + outPut.seekp(-2, outPut.cur); + } + outPut << ") { }\n"; + //TODO GenericInstMethod + } + } + return outPut.str(); +} + +std::string dump_property(Il2CppClass *klass) { + std::stringstream outPut; + if (klass->property_count > 0) { + outPut << "\n\t// Properties\n"; + void *iter = nullptr; + while (auto prop = il2cpp_class_get_properties(klass, &iter)) { + //TODO attribute + outPut << "\t"; + Il2CppClass *prop_class = nullptr; + if (prop->get) { + outPut << get_method_modifier(prop->get->flags); + prop_class = il2cpp_class_from_type(prop->get->return_type); + } else if (prop->set) { + outPut << get_method_modifier(prop->set->flags); + prop_class = il2cpp_class_from_type(prop->set->parameters[0].parameter_type); + } + outPut << prop_class->name << " " << prop->name << " { "; + if (prop->get) { + outPut << "get; "; + } + if (prop->set) { + outPut << "set; "; + } + outPut << "}\n"; + } + } + return outPut.str(); +} + +std::string dump_field(Il2CppClass *klass) { + std::stringstream outPut; + if (klass->field_count > 0) { + outPut << "\n\t// Fields\n"; + void *iter = nullptr; + while (auto field = il2cpp_class_get_fields(klass, &iter)) { + //TODO attribute + outPut << "\t"; + auto attrs = field->type->attrs; + auto access = attrs & FIELD_ATTRIBUTE_FIELD_ACCESS_MASK; + switch (access) { + case FIELD_ATTRIBUTE_PRIVATE: + outPut << "private "; + break; + case FIELD_ATTRIBUTE_PUBLIC: + outPut << "public "; + break; + case FIELD_ATTRIBUTE_FAMILY: + outPut << "protected "; + break; + case FIELD_ATTRIBUTE_ASSEMBLY: + case FIELD_ATTRIBUTE_FAM_AND_ASSEM: + outPut << "internal "; + break; + case FIELD_ATTRIBUTE_FAM_OR_ASSEM: + outPut << "protected internal "; + break; + } + if (attrs & FIELD_ATTRIBUTE_LITERAL) { + outPut << "const "; + } else { + if (attrs & FIELD_ATTRIBUTE_STATIC) { + outPut << "static "; + } + if (attrs & FIELD_ATTRIBUTE_INIT_ONLY) { + outPut << "readonly "; + } + } + auto field_class = il2cpp_class_from_type(field->type); + outPut << field_class->name << " " << field->name; + //TODO DefaultValue + outPut << "; // 0x" << std::hex << field->offset << "\n"; + } + } + return outPut.str(); +} + +std::string dump_type(const Il2CppType *type) { + std::stringstream outPut; + auto *klass = il2cpp_class_from_type(type); + outPut << "\n// Namespace: " << klass->namespaze << "\n"; + auto flags = klass->flags; + if (flags & TYPE_ATTRIBUTE_SERIALIZABLE) { + outPut << "[Serializable]\n"; + } + //TODO attribute + auto visibility = flags & TYPE_ATTRIBUTE_VISIBILITY_MASK; + switch (visibility) { + case TYPE_ATTRIBUTE_PUBLIC: + case TYPE_ATTRIBUTE_NESTED_PUBLIC: + outPut << "public "; + break; + case TYPE_ATTRIBUTE_NOT_PUBLIC: + case TYPE_ATTRIBUTE_NESTED_FAM_AND_ASSEM: + case TYPE_ATTRIBUTE_NESTED_ASSEMBLY: + outPut << "internal "; + break; + case TYPE_ATTRIBUTE_NESTED_PRIVATE: + outPut << "private "; + break; + case TYPE_ATTRIBUTE_NESTED_FAMILY: + outPut << "protected "; + break; + case TYPE_ATTRIBUTE_NESTED_FAM_OR_ASSEM: + outPut << "protected internal "; + break; + } + if (flags & TYPE_ATTRIBUTE_ABSTRACT && flags & TYPE_ATTRIBUTE_SEALED) { + outPut << "static "; + } else if (!(flags & TYPE_ATTRIBUTE_INTERFACE) && flags & TYPE_ATTRIBUTE_ABSTRACT) { + outPut << "abstract "; + } else if (!klass->valuetype && !klass->enumtype && flags & TYPE_ATTRIBUTE_SEALED) { + outPut << "sealed "; + } + if (flags & TYPE_ATTRIBUTE_INTERFACE) { + outPut << "interface "; + } else if (klass->enumtype) { + outPut << "enum "; + } else if (klass->valuetype) { + outPut << "struct "; + } else { + outPut << "class "; + } + outPut << klass->name; //TODO genericContainerIndex + std::vector extends; + if (!klass->valuetype && !klass->enumtype && klass->parent) { + auto parent_type = il2cpp_class_get_type(klass->parent); + if (parent_type->type != IL2CPP_TYPE_OBJECT) { + extends.emplace_back(klass->parent->name); + } + } + if (klass->interfaces_count > 0) { + void *iter = NULL; + while (auto itf = il2cpp_class_get_interfaces(klass, &iter)) { + extends.emplace_back(itf->name); + } + } + if (!extends.empty()) { + outPut << " : " << extends[0]; + for (int i = 1; i < extends.size(); ++i) { + outPut << ", " << extends[i]; + } + } + outPut << " // TypeDefIndex: " << type->data.klassIndex << "\n{"; + outPut << dump_field(klass); + outPut << dump_property(klass); + outPut << dump_method(klass); + //TODO EventInfo + outPut << "}\n"; + return outPut.str(); +} + +void il2cpp_dump(void *handle, char *outDir) { + LOGI("il2cpp_handle: %p", handle); + il2cpp_handle = handle; + init_il2cpp_api(); + auto domain = il2cpp_domain_get(); + size_t size; + auto assemblies = il2cpp_domain_get_assemblies(domain, &size); + uint32_t typeDefinitionsCount = 0; + std::stringstream imageOutput; + for (int i = 0; i < size; ++i) { + auto image = il2cpp_assembly_get_image(assemblies[i]); + typeDefinitionsCount += image->typeCount; + imageOutput << "// Image " << i << ": " << image->name << " - " << image->typeStart << "\n"; + } + std::vector outPuts(typeDefinitionsCount); + LOGI("typeDefinitionsCount: %i", typeDefinitionsCount); + //TODO 2018.3.0f2(24.1)及以上版本可以使用il2cpp_image_get_class而不需要获取metadataregistration地址 + uint64_t metadataregistration_addr = 0; + process_maps(typeDefinitionsCount, &il2cpp_baseaddr, &metadataregistration_addr); + LOGI("il2cpp_addr: %" PRIx64"", il2cpp_baseaddr); + LOGI("metadataregistration_addr: %" PRIx64"", metadataregistration_addr); + if (metadataregistration_addr > 0) { + auto *metadataRegistration = (Il2CppMetadataRegistration *) metadataregistration_addr; + for (int i = 0; i < metadataRegistration->typesCount; ++i) { + auto type = metadataRegistration->types[i]; + switch (type->type) { + case IL2CPP_TYPE_VOID: + case IL2CPP_TYPE_BOOLEAN: + case IL2CPP_TYPE_CHAR: + case IL2CPP_TYPE_I1: + case IL2CPP_TYPE_U1: + case IL2CPP_TYPE_I2: + case IL2CPP_TYPE_U2: + case IL2CPP_TYPE_I4: + case IL2CPP_TYPE_U4: + case IL2CPP_TYPE_I8: + case IL2CPP_TYPE_U8: + case IL2CPP_TYPE_R4: + case IL2CPP_TYPE_R8: + case IL2CPP_TYPE_STRING: + case IL2CPP_TYPE_VALUETYPE: + case IL2CPP_TYPE_CLASS: + case IL2CPP_TYPE_TYPEDBYREF: + case IL2CPP_TYPE_I: + case IL2CPP_TYPE_U: + case IL2CPP_TYPE_OBJECT: + case IL2CPP_TYPE_ENUM: { + //LOGD("type name : %s", il2cpp_type_get_name(type)); + auto klassIndex = type->data.klassIndex; + if (outPuts[klassIndex].empty()) { + outPuts[klassIndex] = dump_type(type); + } + break; + } + default: + break; + } + } + LOGI("write dump file"); + auto outPath = std::string(outDir).append("/files/dump.cs"); + std::ofstream outStream(outPath); + outStream << imageOutput.str(); + for (int i = 0; i < typeDefinitionsCount; ++i) { + if (!outPuts[i].empty()) { + outStream << outPuts[i]; + } else { + LOGW("miss typeDefinition: %d", i); + } + } + outStream.close(); + LOGI("dump done!"); + } +} \ No newline at end of file diff --git a/module/src/main/cpp/il2cpp.h b/module/src/main/cpp/il2cpp.h new file mode 100644 index 00000000..fcb75136 --- /dev/null +++ b/module/src/main/cpp/il2cpp.h @@ -0,0 +1,20 @@ +// +// Created by Perfare on 2020/7/4. +// + +#ifndef RIRU_IL2CPPDUMPER_IL2CPP_H +#define RIRU_IL2CPPDUMPER_IL2CPP_H + +#include "game.h" + +#define STR(x) #x +#define STRINGIFY_MACRO(x) STR(x) +#define EXPAND(x) x +#define IL2CPPHEADER(a, b, c) STRINGIFY_MACRO(EXPAND(a)EXPAND(b)EXPAND(c)) +#define IL2CPPAPIDIR il2cppapi/ +#define IL2CPPCLASS IL2CPPHEADER(IL2CPPAPIDIR, UnityVersion, /il2cpp-class.h) +#define IL2CPPAPI IL2CPPHEADER(IL2CPPAPIDIR, UnityVersion, /il2cpp-api-functions.h) + +void il2cpp_dump(void *handle, char *outDir); + +#endif //RIRU_IL2CPPDUMPER_IL2CPP_H diff --git a/module/src/main/cpp/il2cppapi/2017.1.0f3/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/2017.1.0f3/il2cpp-api-functions.h new file mode 100644 index 00000000..31a8c402 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2017.1.0f3/il2cpp-api-functions.h @@ -0,0 +1,263 @@ +DO_API(void, il2cpp_init, (const char* domain_name)); +DO_API(void, il2cpp_init_utf16, (const Il2CppChar * domain_name)); +DO_API(void, il2cpp_shutdown, ()); +DO_API(void, il2cpp_set_config_dir, (const char *config_path)); +DO_API(void, il2cpp_set_data_dir, (const char *data_path)); +DO_API(void, il2cpp_set_temp_dir, (const char *temp_path)); +DO_API(void, il2cpp_set_commandline_arguments, (int argc, const char* const argv[], const char* basedir)); +DO_API(void, il2cpp_set_commandline_arguments_utf16, (int argc, const Il2CppChar * const argv[], const char* basedir)); +DO_API(void, il2cpp_set_config_utf16, (const Il2CppChar * executablePath)); +DO_API(void, il2cpp_set_config, (const char* executablePath)); + +DO_API(void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks * callbacks)); +DO_API(const Il2CppImage*, il2cpp_get_corlib, ()); +DO_API(void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method)); +DO_API(Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name)); + +DO_API(void*, il2cpp_alloc, (size_t size)); +DO_API(void, il2cpp_free, (void* ptr)); + +// array +DO_API(Il2CppClass*, il2cpp_array_class_get, (Il2CppClass * element_class, uint32_t rank)); +DO_API(uint32_t, il2cpp_array_length, (Il2CppArray * array)); +DO_API(uint32_t, il2cpp_array_get_byte_length, (Il2CppArray * array)); +DO_API(Il2CppArray*, il2cpp_array_new, (Il2CppClass * elementTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass * arrayTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_full, (Il2CppClass * array_class, il2cpp_array_size_t * lengths, il2cpp_array_size_t * lower_bounds)); +DO_API(Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass * element_class, uint32_t rank, bool bounded)); +DO_API(int, il2cpp_array_element_size, (const Il2CppClass * array_class)); + +// assembly +DO_API(const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly * assembly)); + +// class +DO_API(const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_generic, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_inflated, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_assignable_from, (Il2CppClass * klass, Il2CppClass * oklass)); +DO_API(bool, il2cpp_class_is_subclass_of, (Il2CppClass * klass, Il2CppClass * klassc, bool check_interfaces)); +DO_API(bool, il2cpp_class_has_parent, (Il2CppClass * klass, Il2CppClass * klassc)); +DO_API(Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage * image, const char* namespaze, const char *name)); +DO_API(Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType * type)); +DO_API(Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass * klass)); +DO_API(const EventInfo*, il2cpp_class_get_events, (Il2CppClass * klass, void* *iter)); +DO_API(FieldInfo*, il2cpp_class_get_fields, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass * klass, const char *name)); +DO_API(FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass * klass, const char *name)); +DO_API(const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass * klass, void* *iter)); +DO_API(const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass * klass, const char* name, int argsCount)); +DO_API(const char*, il2cpp_class_get_name, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_namespace, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_instance_size, (Il2CppClass * klass)); +DO_API(size_t, il2cpp_class_num_fields, (const Il2CppClass * enumKlass)); +DO_API(bool, il2cpp_class_is_valuetype, (const Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_value_size, (Il2CppClass * klass, uint32_t * align)); +DO_API(bool, il2cpp_class_is_blittable, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_flags, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_abstract, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_interface, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_array_element_size, (const Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_from_type, (const Il2CppType * type)); +DO_API(const Il2CppType*, il2cpp_class_get_type, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_has_attribute, (Il2CppClass * klass, Il2CppClass * attr_class)); +DO_API(bool, il2cpp_class_has_references, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_enum, (const Il2CppClass * klass)); +DO_API(const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_assemblyname, (const Il2CppClass * klass)); + +// testing only +DO_API(size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass * klass)); +DO_API(void, il2cpp_class_get_bitmap, (Il2CppClass * klass, size_t * bitmap)); + +// stats +DO_API(bool, il2cpp_stats_dump_to_file, (const char *path)); +DO_API(uint64_t, il2cpp_stats_get_value, (Il2CppStat stat)); + +// domain +DO_API(Il2CppDomain*, il2cpp_domain_get, ()); +DO_API(const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain * domain, const char* name)); +DO_API(const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain * domain, size_t * size)); + +// exception +DO_API(void, il2cpp_raise_exception, (Il2CppException*)); +DO_API(Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage * image, const char *name_space, const char *name, const char *msg)); +DO_API(Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg)); +DO_API(void, il2cpp_format_exception, (const Il2CppException * ex, char* message, int message_size)); +DO_API(void, il2cpp_format_stack_trace, (const Il2CppException * ex, char* output, int output_size)); +DO_API(void, il2cpp_unhandled_exception, (Il2CppException*)); + +// field +DO_API(int, il2cpp_field_get_flags, (FieldInfo * field)); +DO_API(const char*, il2cpp_field_get_name, (FieldInfo * field)); +DO_API(Il2CppClass*, il2cpp_field_get_parent, (FieldInfo * field)); +DO_API(size_t, il2cpp_field_get_offset, (FieldInfo * field)); +DO_API(const Il2CppType*, il2cpp_field_get_type, (FieldInfo * field)); +DO_API(void, il2cpp_field_get_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo * field, Il2CppObject * obj)); +DO_API(bool, il2cpp_field_has_attribute, (FieldInfo * field, Il2CppClass * attr_class)); +DO_API(void, il2cpp_field_set_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_get_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_set_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_set_value_object, (Il2CppObject * instance, FieldInfo * field, Il2CppObject * value)); + +// gc +DO_API(void, il2cpp_gc_collect, (int maxGenerations)); +DO_API(int32_t, il2cpp_gc_collect_a_little, ()); +DO_API(void, il2cpp_gc_disable, ()); +DO_API(void, il2cpp_gc_enable, ()); +DO_API(int64_t, il2cpp_gc_get_used_size, ()); +DO_API(int64_t, il2cpp_gc_get_heap_size, ()); + +// gchandle +DO_API(uint32_t, il2cpp_gchandle_new, (Il2CppObject * obj, bool pinned)); +DO_API(uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject * obj, bool track_resurrection)); +DO_API(Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle)); +DO_API(void, il2cpp_gchandle_free, (uint32_t gchandle)); + +// liveness +DO_API(void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass * filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped)); +DO_API(void, il2cpp_unity_liveness_calculation_end, (void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject * root, void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_statics, (void* state)); + +// method +DO_API(const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo * method)); +DO_API(Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_name, (const MethodInfo * method)); +DO_API(Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo * method, Il2CppClass * refclass)); +DO_API(bool, il2cpp_method_is_generic, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_inflated, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_instance, (const MethodInfo * method)); +DO_API(uint32_t, il2cpp_method_get_param_count, (const MethodInfo * method)); +DO_API(const Il2CppType*, il2cpp_method_get_param, (const MethodInfo * method, uint32_t index)); +DO_API(Il2CppClass*, il2cpp_method_get_class, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_has_attribute, (const MethodInfo * method, Il2CppClass * attr_class)); +DO_API(uint32_t, il2cpp_method_get_flags, (const MethodInfo * method, uint32_t * iflags)); +DO_API(uint32_t, il2cpp_method_get_token, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_param_name, (const MethodInfo * method, uint32_t index)); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API(void, il2cpp_profiler_install, (Il2CppProfiler * prof, Il2CppProfileFunc shutdown_callback)); +DO_API(void, il2cpp_profiler_set_events, (Il2CppProfileFlags events)); +DO_API(void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave)); +DO_API(void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback)); +DO_API(void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback)); + +#endif + +// property +DO_API(uint32_t, il2cpp_property_get_flags, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo * prop)); +DO_API(const char*, il2cpp_property_get_name, (PropertyInfo * prop)); +DO_API(Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo * prop)); + +// object +DO_API(Il2CppClass*, il2cpp_object_get_class, (Il2CppObject * obj)); +DO_API(uint32_t, il2cpp_object_get_size, (Il2CppObject * obj)); +DO_API(const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject * obj, const MethodInfo * method)); +DO_API(Il2CppObject*, il2cpp_object_new, (const Il2CppClass * klass)); +DO_API(void*, il2cpp_object_unbox, (Il2CppObject * obj)); + +DO_API(Il2CppObject*, il2cpp_value_box, (Il2CppClass * klass, void* data)); + +// monitor +DO_API(void, il2cpp_monitor_enter, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_enter, (Il2CppObject * obj, uint32_t timeout)); +DO_API(void, il2cpp_monitor_exit, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse_all, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_wait, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_wait, (Il2CppObject * obj, uint32_t timeout)); + +// runtime +DO_API(Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo * method, void *obj, void **params, Il2CppException **exc)); +DO_API(Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo * method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc)); +DO_API(void, il2cpp_runtime_class_init, (Il2CppClass * klass)); +DO_API(void, il2cpp_runtime_object_init, (Il2CppObject * obj)); + +DO_API(void, il2cpp_runtime_object_init_exception, (Il2CppObject * obj, Il2CppException** exc)); + +DO_API(void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value)); + +// string +DO_API(int32_t, il2cpp_string_length, (Il2CppString * str)); +DO_API(Il2CppChar*, il2cpp_string_chars, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_new, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length)); +DO_API(Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar * text, int32_t len)); +DO_API(Il2CppString*, il2cpp_string_new_wrapper, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_intern, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_is_interned, (Il2CppString * str)); + +// thread +DO_API(char*, il2cpp_thread_get_name, (Il2CppThread * thread, uint32_t * len)); +DO_API(Il2CppThread*, il2cpp_thread_current, ()); +DO_API(Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain * domain)); +DO_API(void, il2cpp_thread_detach, (Il2CppThread * thread)); + +DO_API(Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t * size)); +DO_API(bool, il2cpp_is_vm_thread, (Il2CppThread * thread)); + +// stacktrace +DO_API(void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data)); +DO_API(void, il2cpp_thread_walk_frame_stack, (Il2CppThread * thread, Il2CppFrameWalkFunc func, void* user_data)); +DO_API(bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo & frame)); +DO_API(bool, il2cpp_thread_get_top_frame, (Il2CppThread * thread, Il2CppStackFrameInfo & frame)); +DO_API(bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo & frame)); +DO_API(bool, il2cpp_thread_get_frame_at, (Il2CppThread * thread, int32_t offset, Il2CppStackFrameInfo & frame)); +DO_API(int32_t, il2cpp_current_thread_get_stack_depth, ()); +DO_API(int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread * thread)); + +// type +DO_API(Il2CppObject*, il2cpp_type_get_object, (const Il2CppType * type)); +DO_API(int, il2cpp_type_get_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType * type)); +DO_API(char*, il2cpp_type_get_name, (const Il2CppType * type)); + +// image +DO_API(const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_name, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_filename, (const Il2CppImage * image)); +DO_API(const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage * image)); + +// Memory information +DO_API(Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, ()); +DO_API(void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot * snapshot)); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +// Logging +DO_API(void, il2cpp_register_log_callback, (Il2CppLogCallback method)); + +#if IL2CPP_DEBUGGER_ENABLED +// debug +DO_API(const Il2CppDebugTypeInfo*, il2cpp_debug_get_class_info, (const Il2CppClass * klass)); +DO_API(const Il2CppDebugDocument*, il2cpp_debug_class_get_document, (const Il2CppDebugTypeInfo * info)); +DO_API(const char*, il2cpp_debug_document_get_filename, (const Il2CppDebugDocument * document)); +DO_API(const char*, il2cpp_debug_document_get_directory, (const Il2CppDebugDocument * document)); +DO_API(const Il2CppDebugMethodInfo*, il2cpp_debug_get_method_info, (const MethodInfo * method)); +DO_API(const Il2CppDebugDocument*, il2cpp_debug_method_get_document, (const Il2CppDebugMethodInfo * info)); +DO_API(const int32_t*, il2cpp_debug_method_get_offset_table, (const Il2CppDebugMethodInfo * info)); +DO_API(size_t, il2cpp_debug_method_get_code_size, (const Il2CppDebugMethodInfo * info)); +DO_API(void, il2cpp_debug_update_frame_il_offset, (int32_t il_offset)); +DO_API(const Il2CppDebugLocalsInfo**, il2cpp_debug_method_get_locals_info, (const Il2CppDebugMethodInfo * info)); +DO_API(const Il2CppClass*, il2cpp_debug_local_get_type, (const Il2CppDebugLocalsInfo * info)); +DO_API(const char*, il2cpp_debug_local_get_name, (const Il2CppDebugLocalsInfo * info)); +DO_API(uint32_t, il2cpp_debug_local_get_start_offset, (const Il2CppDebugLocalsInfo * info)); +DO_API(uint32_t, il2cpp_debug_local_get_end_offset, (const Il2CppDebugLocalsInfo * info)); +DO_API(Il2CppObject*, il2cpp_debug_method_get_param_value, (const Il2CppStackFrameInfo * info, uint32_t position)); +DO_API(Il2CppObject*, il2cpp_debug_frame_get_local_value, (const Il2CppStackFrameInfo * info, uint32_t position)); +DO_API(void*, il2cpp_debug_method_get_breakpoint_data_at, (const Il2CppDebugMethodInfo * info, int64_t uid, int32_t offset)); +DO_API(void, il2cpp_debug_method_set_breakpoint_data_at, (const Il2CppDebugMethodInfo * info, uint64_t location, void *data)); +DO_API(void, il2cpp_debug_method_clear_breakpoint_data, (const Il2CppDebugMethodInfo * info)); +DO_API(void, il2cpp_debug_method_clear_breakpoint_data_at, (const Il2CppDebugMethodInfo * info, uint64_t location)); +#endif diff --git a/module/src/main/cpp/il2cppapi/2017.1.0f3/il2cpp-class.h b/module/src/main/cpp/il2cppapi/2017.1.0f3/il2cpp-class.h new file mode 100644 index 00000000..ae7275ae --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2017.1.0f3/il2cpp-class.h @@ -0,0 +1,1027 @@ +typedef uint32_t Il2CppMethodSlot; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef enum Il2CppProfileFlags +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19 +} Il2CppProfileFlags; +typedef enum Il2CppGCEvent +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum Il2CppStat +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum StackFrameType +{ + FRAME_TYPE_MANAGED = 0, + FRAME_TYPE_DEBUGGER_INVOKE = 1, + FRAME_TYPE_MANAGED_TO_NATIVE = 2, + FRAME_TYPE_SENTINEL = 3 +} StackFrameType; +typedef enum Il2CppRuntimeUnhandledExceptionPolicy +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct +{ + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef void (*Il2CppLogCallback)(const char*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef int32_t il2cpp_array_size_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST = 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t InteropDataIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +const InteropDataIndex kInteropDataIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +static inline Il2CppMetadataUsage GetEncodedIndexType(EncodedMethodIndex index) +{ + return (Il2CppMetadataUsage)((index & 0xE0000000) >> 29); +} +static inline uint32_t GetDecodedMethodIndex(EncodedMethodIndex index) +{ + return index & 0x1FFFFFFFU; +} +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD, + IL2CPP_RGCTX_DATA_ARRAY, +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + CustomAttributeIndex customAttributeIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + CustomAttributeIndex customAttributeIndex; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex reversePInvokeWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct Il2CppGenericMethodIndices +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyName +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t publicKeyToken[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + uint32_t token; +} Il2CppImageDefinition; +typedef struct Il2CppAssembly +{ + ImageIndex imageIndex; + CustomAttributeIndex customAttributeIndex; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +typedef struct Il2CppRange +{ + int32_t start; + int32_t length; +} Il2CppRange; +typedef struct Il2CppWindowsRuntimeTypeNamePair +{ + StringIndex nameIndex; + TypeIndex typeIndex; +} Il2CppWindowsRuntimeTypeNamePair; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; + int32_t unresolvedVirtualCallParameterTypesOffset; + int32_t unresolvedVirtualCallParameterTypesCount; + int32_t unresolvedVirtualCallParameterRangesOffset; + int32_t unresolvedVirtualCallParameterRangesCount; + int32_t windowsRuntimeTypeNamesOffset; + int32_t windowsRuntimeTypeNamesSize; + int32_t exportedTypeDefinitionsOffset; + int32_t exportedTypeDefinitionsCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union + { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum +{ + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE +} Il2CppCharSet; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppAppDomainSetup Il2CppAppDomainSetup; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct VirtualInvokeData +{ + Il2CppMethodPointer methodPtr; + const MethodInfo* method; +} VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +extern bool g_il2cpp_is_fully_initialized; +typedef struct +{ + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; + Il2CppClass* ireference_class; + Il2CppClass* ikey_value_pair_class; + Il2CppClass* key_value_pair_class; + Il2CppClass* windows_foundation_uri_class; + Il2CppClass* windows_foundation_iuri_runtime_class_class; + Il2CppClass* system_uri_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef struct CustomAttributeTypeCache +{ + int count; + Il2CppClass** attributeTypes; +} CustomAttributeTypeCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(Il2CppMethodPointer, const MethodInfo*, void*, void**); +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *declaring_type; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + CustomAttributeIndex customAttributeIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef void (*PInvokeMarshalToNativeFunc)(void* managedStructure, void* marshaledStructure); +typedef void (*PInvokeMarshalFromNativeFunc)(void* marshaledStructure, void* managedStructure); +typedef void (*PInvokeMarshalCleanupFunc)(void* marshaledStructure); +typedef struct Il2CppIUnknown* (*CreateCCWFunc)(Il2CppObject* obj); +typedef struct Il2CppInteropData +{ + Il2CppMethodPointer delegatePInvokeWrapperFunction; + PInvokeMarshalToNativeFunc pinvokeMarshalToNativeFunction; + PInvokeMarshalFromNativeFunc pinvokeMarshalFromNativeFunction; + PInvokeMarshalCleanupFunc pinvokeMarshalCleanupFunction; + CreateCCWFunc createCCWFunction; + const Il2CppGuid* guid; + const Il2CppType* type; +} Il2CppInteropData; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + const Il2CppType* byval_arg; + const Il2CppType* this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + const Il2CppInteropData* interopData; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t genericRecursionDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t packingSize; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; + uint8_t is_vtable_initialized : 1; + VirtualInvokeData vtable[32]; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppAppDomainSetup* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; +} Il2CppDomain; +typedef struct Il2CppImage +{ + const char* name; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable* nameToClassHashTable; + uint32_t token; +} Il2CppImage; +typedef struct Il2CppCodeGenOptions +{ + bool enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const Il2CppMethodPointer* methodPointers; + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + uint32_t unresolvedVirtualCallCount; + const Il2CppMethodPointer* unresolvedVirtualCallPointers; + uint32_t interopDataCount; + Il2CppInteropData* interopData; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + bool enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/2017.1.3f1/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/2017.1.3f1/il2cpp-api-functions.h new file mode 100644 index 00000000..31a8c402 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2017.1.3f1/il2cpp-api-functions.h @@ -0,0 +1,263 @@ +DO_API(void, il2cpp_init, (const char* domain_name)); +DO_API(void, il2cpp_init_utf16, (const Il2CppChar * domain_name)); +DO_API(void, il2cpp_shutdown, ()); +DO_API(void, il2cpp_set_config_dir, (const char *config_path)); +DO_API(void, il2cpp_set_data_dir, (const char *data_path)); +DO_API(void, il2cpp_set_temp_dir, (const char *temp_path)); +DO_API(void, il2cpp_set_commandline_arguments, (int argc, const char* const argv[], const char* basedir)); +DO_API(void, il2cpp_set_commandline_arguments_utf16, (int argc, const Il2CppChar * const argv[], const char* basedir)); +DO_API(void, il2cpp_set_config_utf16, (const Il2CppChar * executablePath)); +DO_API(void, il2cpp_set_config, (const char* executablePath)); + +DO_API(void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks * callbacks)); +DO_API(const Il2CppImage*, il2cpp_get_corlib, ()); +DO_API(void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method)); +DO_API(Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name)); + +DO_API(void*, il2cpp_alloc, (size_t size)); +DO_API(void, il2cpp_free, (void* ptr)); + +// array +DO_API(Il2CppClass*, il2cpp_array_class_get, (Il2CppClass * element_class, uint32_t rank)); +DO_API(uint32_t, il2cpp_array_length, (Il2CppArray * array)); +DO_API(uint32_t, il2cpp_array_get_byte_length, (Il2CppArray * array)); +DO_API(Il2CppArray*, il2cpp_array_new, (Il2CppClass * elementTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass * arrayTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_full, (Il2CppClass * array_class, il2cpp_array_size_t * lengths, il2cpp_array_size_t * lower_bounds)); +DO_API(Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass * element_class, uint32_t rank, bool bounded)); +DO_API(int, il2cpp_array_element_size, (const Il2CppClass * array_class)); + +// assembly +DO_API(const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly * assembly)); + +// class +DO_API(const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_generic, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_inflated, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_assignable_from, (Il2CppClass * klass, Il2CppClass * oklass)); +DO_API(bool, il2cpp_class_is_subclass_of, (Il2CppClass * klass, Il2CppClass * klassc, bool check_interfaces)); +DO_API(bool, il2cpp_class_has_parent, (Il2CppClass * klass, Il2CppClass * klassc)); +DO_API(Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage * image, const char* namespaze, const char *name)); +DO_API(Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType * type)); +DO_API(Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass * klass)); +DO_API(const EventInfo*, il2cpp_class_get_events, (Il2CppClass * klass, void* *iter)); +DO_API(FieldInfo*, il2cpp_class_get_fields, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass * klass, const char *name)); +DO_API(FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass * klass, const char *name)); +DO_API(const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass * klass, void* *iter)); +DO_API(const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass * klass, const char* name, int argsCount)); +DO_API(const char*, il2cpp_class_get_name, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_namespace, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_instance_size, (Il2CppClass * klass)); +DO_API(size_t, il2cpp_class_num_fields, (const Il2CppClass * enumKlass)); +DO_API(bool, il2cpp_class_is_valuetype, (const Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_value_size, (Il2CppClass * klass, uint32_t * align)); +DO_API(bool, il2cpp_class_is_blittable, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_flags, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_abstract, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_interface, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_array_element_size, (const Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_from_type, (const Il2CppType * type)); +DO_API(const Il2CppType*, il2cpp_class_get_type, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_has_attribute, (Il2CppClass * klass, Il2CppClass * attr_class)); +DO_API(bool, il2cpp_class_has_references, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_enum, (const Il2CppClass * klass)); +DO_API(const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_assemblyname, (const Il2CppClass * klass)); + +// testing only +DO_API(size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass * klass)); +DO_API(void, il2cpp_class_get_bitmap, (Il2CppClass * klass, size_t * bitmap)); + +// stats +DO_API(bool, il2cpp_stats_dump_to_file, (const char *path)); +DO_API(uint64_t, il2cpp_stats_get_value, (Il2CppStat stat)); + +// domain +DO_API(Il2CppDomain*, il2cpp_domain_get, ()); +DO_API(const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain * domain, const char* name)); +DO_API(const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain * domain, size_t * size)); + +// exception +DO_API(void, il2cpp_raise_exception, (Il2CppException*)); +DO_API(Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage * image, const char *name_space, const char *name, const char *msg)); +DO_API(Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg)); +DO_API(void, il2cpp_format_exception, (const Il2CppException * ex, char* message, int message_size)); +DO_API(void, il2cpp_format_stack_trace, (const Il2CppException * ex, char* output, int output_size)); +DO_API(void, il2cpp_unhandled_exception, (Il2CppException*)); + +// field +DO_API(int, il2cpp_field_get_flags, (FieldInfo * field)); +DO_API(const char*, il2cpp_field_get_name, (FieldInfo * field)); +DO_API(Il2CppClass*, il2cpp_field_get_parent, (FieldInfo * field)); +DO_API(size_t, il2cpp_field_get_offset, (FieldInfo * field)); +DO_API(const Il2CppType*, il2cpp_field_get_type, (FieldInfo * field)); +DO_API(void, il2cpp_field_get_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo * field, Il2CppObject * obj)); +DO_API(bool, il2cpp_field_has_attribute, (FieldInfo * field, Il2CppClass * attr_class)); +DO_API(void, il2cpp_field_set_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_get_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_set_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_set_value_object, (Il2CppObject * instance, FieldInfo * field, Il2CppObject * value)); + +// gc +DO_API(void, il2cpp_gc_collect, (int maxGenerations)); +DO_API(int32_t, il2cpp_gc_collect_a_little, ()); +DO_API(void, il2cpp_gc_disable, ()); +DO_API(void, il2cpp_gc_enable, ()); +DO_API(int64_t, il2cpp_gc_get_used_size, ()); +DO_API(int64_t, il2cpp_gc_get_heap_size, ()); + +// gchandle +DO_API(uint32_t, il2cpp_gchandle_new, (Il2CppObject * obj, bool pinned)); +DO_API(uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject * obj, bool track_resurrection)); +DO_API(Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle)); +DO_API(void, il2cpp_gchandle_free, (uint32_t gchandle)); + +// liveness +DO_API(void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass * filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped)); +DO_API(void, il2cpp_unity_liveness_calculation_end, (void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject * root, void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_statics, (void* state)); + +// method +DO_API(const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo * method)); +DO_API(Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_name, (const MethodInfo * method)); +DO_API(Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo * method, Il2CppClass * refclass)); +DO_API(bool, il2cpp_method_is_generic, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_inflated, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_instance, (const MethodInfo * method)); +DO_API(uint32_t, il2cpp_method_get_param_count, (const MethodInfo * method)); +DO_API(const Il2CppType*, il2cpp_method_get_param, (const MethodInfo * method, uint32_t index)); +DO_API(Il2CppClass*, il2cpp_method_get_class, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_has_attribute, (const MethodInfo * method, Il2CppClass * attr_class)); +DO_API(uint32_t, il2cpp_method_get_flags, (const MethodInfo * method, uint32_t * iflags)); +DO_API(uint32_t, il2cpp_method_get_token, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_param_name, (const MethodInfo * method, uint32_t index)); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API(void, il2cpp_profiler_install, (Il2CppProfiler * prof, Il2CppProfileFunc shutdown_callback)); +DO_API(void, il2cpp_profiler_set_events, (Il2CppProfileFlags events)); +DO_API(void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave)); +DO_API(void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback)); +DO_API(void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback)); + +#endif + +// property +DO_API(uint32_t, il2cpp_property_get_flags, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo * prop)); +DO_API(const char*, il2cpp_property_get_name, (PropertyInfo * prop)); +DO_API(Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo * prop)); + +// object +DO_API(Il2CppClass*, il2cpp_object_get_class, (Il2CppObject * obj)); +DO_API(uint32_t, il2cpp_object_get_size, (Il2CppObject * obj)); +DO_API(const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject * obj, const MethodInfo * method)); +DO_API(Il2CppObject*, il2cpp_object_new, (const Il2CppClass * klass)); +DO_API(void*, il2cpp_object_unbox, (Il2CppObject * obj)); + +DO_API(Il2CppObject*, il2cpp_value_box, (Il2CppClass * klass, void* data)); + +// monitor +DO_API(void, il2cpp_monitor_enter, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_enter, (Il2CppObject * obj, uint32_t timeout)); +DO_API(void, il2cpp_monitor_exit, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse_all, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_wait, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_wait, (Il2CppObject * obj, uint32_t timeout)); + +// runtime +DO_API(Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo * method, void *obj, void **params, Il2CppException **exc)); +DO_API(Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo * method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc)); +DO_API(void, il2cpp_runtime_class_init, (Il2CppClass * klass)); +DO_API(void, il2cpp_runtime_object_init, (Il2CppObject * obj)); + +DO_API(void, il2cpp_runtime_object_init_exception, (Il2CppObject * obj, Il2CppException** exc)); + +DO_API(void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value)); + +// string +DO_API(int32_t, il2cpp_string_length, (Il2CppString * str)); +DO_API(Il2CppChar*, il2cpp_string_chars, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_new, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length)); +DO_API(Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar * text, int32_t len)); +DO_API(Il2CppString*, il2cpp_string_new_wrapper, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_intern, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_is_interned, (Il2CppString * str)); + +// thread +DO_API(char*, il2cpp_thread_get_name, (Il2CppThread * thread, uint32_t * len)); +DO_API(Il2CppThread*, il2cpp_thread_current, ()); +DO_API(Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain * domain)); +DO_API(void, il2cpp_thread_detach, (Il2CppThread * thread)); + +DO_API(Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t * size)); +DO_API(bool, il2cpp_is_vm_thread, (Il2CppThread * thread)); + +// stacktrace +DO_API(void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data)); +DO_API(void, il2cpp_thread_walk_frame_stack, (Il2CppThread * thread, Il2CppFrameWalkFunc func, void* user_data)); +DO_API(bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo & frame)); +DO_API(bool, il2cpp_thread_get_top_frame, (Il2CppThread * thread, Il2CppStackFrameInfo & frame)); +DO_API(bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo & frame)); +DO_API(bool, il2cpp_thread_get_frame_at, (Il2CppThread * thread, int32_t offset, Il2CppStackFrameInfo & frame)); +DO_API(int32_t, il2cpp_current_thread_get_stack_depth, ()); +DO_API(int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread * thread)); + +// type +DO_API(Il2CppObject*, il2cpp_type_get_object, (const Il2CppType * type)); +DO_API(int, il2cpp_type_get_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType * type)); +DO_API(char*, il2cpp_type_get_name, (const Il2CppType * type)); + +// image +DO_API(const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_name, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_filename, (const Il2CppImage * image)); +DO_API(const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage * image)); + +// Memory information +DO_API(Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, ()); +DO_API(void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot * snapshot)); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +// Logging +DO_API(void, il2cpp_register_log_callback, (Il2CppLogCallback method)); + +#if IL2CPP_DEBUGGER_ENABLED +// debug +DO_API(const Il2CppDebugTypeInfo*, il2cpp_debug_get_class_info, (const Il2CppClass * klass)); +DO_API(const Il2CppDebugDocument*, il2cpp_debug_class_get_document, (const Il2CppDebugTypeInfo * info)); +DO_API(const char*, il2cpp_debug_document_get_filename, (const Il2CppDebugDocument * document)); +DO_API(const char*, il2cpp_debug_document_get_directory, (const Il2CppDebugDocument * document)); +DO_API(const Il2CppDebugMethodInfo*, il2cpp_debug_get_method_info, (const MethodInfo * method)); +DO_API(const Il2CppDebugDocument*, il2cpp_debug_method_get_document, (const Il2CppDebugMethodInfo * info)); +DO_API(const int32_t*, il2cpp_debug_method_get_offset_table, (const Il2CppDebugMethodInfo * info)); +DO_API(size_t, il2cpp_debug_method_get_code_size, (const Il2CppDebugMethodInfo * info)); +DO_API(void, il2cpp_debug_update_frame_il_offset, (int32_t il_offset)); +DO_API(const Il2CppDebugLocalsInfo**, il2cpp_debug_method_get_locals_info, (const Il2CppDebugMethodInfo * info)); +DO_API(const Il2CppClass*, il2cpp_debug_local_get_type, (const Il2CppDebugLocalsInfo * info)); +DO_API(const char*, il2cpp_debug_local_get_name, (const Il2CppDebugLocalsInfo * info)); +DO_API(uint32_t, il2cpp_debug_local_get_start_offset, (const Il2CppDebugLocalsInfo * info)); +DO_API(uint32_t, il2cpp_debug_local_get_end_offset, (const Il2CppDebugLocalsInfo * info)); +DO_API(Il2CppObject*, il2cpp_debug_method_get_param_value, (const Il2CppStackFrameInfo * info, uint32_t position)); +DO_API(Il2CppObject*, il2cpp_debug_frame_get_local_value, (const Il2CppStackFrameInfo * info, uint32_t position)); +DO_API(void*, il2cpp_debug_method_get_breakpoint_data_at, (const Il2CppDebugMethodInfo * info, int64_t uid, int32_t offset)); +DO_API(void, il2cpp_debug_method_set_breakpoint_data_at, (const Il2CppDebugMethodInfo * info, uint64_t location, void *data)); +DO_API(void, il2cpp_debug_method_clear_breakpoint_data, (const Il2CppDebugMethodInfo * info)); +DO_API(void, il2cpp_debug_method_clear_breakpoint_data_at, (const Il2CppDebugMethodInfo * info, uint64_t location)); +#endif diff --git a/module/src/main/cpp/il2cppapi/2017.1.3f1/il2cpp-class.h b/module/src/main/cpp/il2cppapi/2017.1.3f1/il2cpp-class.h new file mode 100644 index 00000000..e898f9d8 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2017.1.3f1/il2cpp-class.h @@ -0,0 +1,1028 @@ +typedef uint32_t Il2CppMethodSlot; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef enum Il2CppProfileFlags +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19 +} Il2CppProfileFlags; +typedef enum Il2CppGCEvent +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum Il2CppStat +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum StackFrameType +{ + FRAME_TYPE_MANAGED = 0, + FRAME_TYPE_DEBUGGER_INVOKE = 1, + FRAME_TYPE_MANAGED_TO_NATIVE = 2, + FRAME_TYPE_SENTINEL = 3 +} StackFrameType; +typedef enum Il2CppRuntimeUnhandledExceptionPolicy +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct +{ + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef void (*Il2CppLogCallback)(const char*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef int32_t il2cpp_array_size_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST = 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t InteropDataIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +const InteropDataIndex kInteropDataIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +static inline Il2CppMetadataUsage GetEncodedIndexType(EncodedMethodIndex index) +{ + return (Il2CppMetadataUsage)((index & 0xE0000000) >> 29); +} +static inline uint32_t GetDecodedMethodIndex(EncodedMethodIndex index) +{ + return index & 0x1FFFFFFFU; +} +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD, + IL2CPP_RGCTX_DATA_ARRAY, +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + CustomAttributeIndex customAttributeIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + CustomAttributeIndex customAttributeIndex; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex reversePInvokeWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct Il2CppGenericMethodIndices +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyName +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t publicKeyToken[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + uint32_t token; +} Il2CppImageDefinition; +typedef struct Il2CppAssembly +{ + ImageIndex imageIndex; + CustomAttributeIndex customAttributeIndex; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +typedef struct Il2CppRange +{ + int32_t start; + int32_t length; +} Il2CppRange; +typedef struct Il2CppWindowsRuntimeTypeNamePair +{ + StringIndex nameIndex; + TypeIndex typeIndex; +} Il2CppWindowsRuntimeTypeNamePair; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; + int32_t unresolvedVirtualCallParameterTypesOffset; + int32_t unresolvedVirtualCallParameterTypesCount; + int32_t unresolvedVirtualCallParameterRangesOffset; + int32_t unresolvedVirtualCallParameterRangesCount; + int32_t windowsRuntimeTypeNamesOffset; + int32_t windowsRuntimeTypeNamesSize; + int32_t exportedTypeDefinitionsOffset; + int32_t exportedTypeDefinitionsCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union + { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum +{ + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE +} Il2CppCharSet; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppAppDomainSetup Il2CppAppDomainSetup; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct VirtualInvokeData +{ + Il2CppMethodPointer methodPtr; + const MethodInfo* method; +} VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +extern bool g_il2cpp_is_fully_initialized; +typedef struct +{ + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; + Il2CppClass* ireference_class; + Il2CppClass* ikey_value_pair_class; + Il2CppClass* key_value_pair_class; + Il2CppClass* windows_foundation_uri_class; + Il2CppClass* windows_foundation_iuri_runtime_class_class; + Il2CppClass* system_uri_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef struct CustomAttributeTypeCache +{ + int count; + Il2CppClass** attributeTypes; +} CustomAttributeTypeCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(Il2CppMethodPointer, const MethodInfo*, void*, void**); +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *declaring_type; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + CustomAttributeIndex customAttributeIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef void (*PInvokeMarshalToNativeFunc)(void* managedStructure, void* marshaledStructure); +typedef void (*PInvokeMarshalFromNativeFunc)(void* marshaledStructure, void* managedStructure); +typedef void (*PInvokeMarshalCleanupFunc)(void* marshaledStructure); +typedef struct Il2CppIUnknown* (*CreateCCWFunc)(Il2CppObject* obj); +typedef struct Il2CppInteropData +{ + Il2CppMethodPointer delegatePInvokeWrapperFunction; + PInvokeMarshalToNativeFunc pinvokeMarshalToNativeFunction; + PInvokeMarshalFromNativeFunc pinvokeMarshalFromNativeFunction; + PInvokeMarshalCleanupFunc pinvokeMarshalCleanupFunction; + CreateCCWFunc createCCWFunction; + const Il2CppGuid* guid; + const Il2CppType* type; +} Il2CppInteropData; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + const Il2CppType* byval_arg; + const Il2CppType* this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + const Il2CppInteropData* interopData; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t genericRecursionDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t packingSize; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; + uint8_t is_vtable_initialized : 1; + VirtualInvokeData vtable[32]; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppAppDomainSetup* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; +} Il2CppDomain; +typedef struct Il2CppImage +{ + const char* name; + const char* nameNoExt; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable* nameToClassHashTable; + uint32_t token; +} Il2CppImage; +typedef struct Il2CppCodeGenOptions +{ + bool enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const Il2CppMethodPointer* methodPointers; + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + uint32_t unresolvedVirtualCallCount; + const Il2CppMethodPointer* unresolvedVirtualCallPointers; + uint32_t interopDataCount; + Il2CppInteropData* interopData; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + bool enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/2017.2.0f3/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/2017.2.0f3/il2cpp-api-functions.h new file mode 100644 index 00000000..2dc7fb31 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2017.2.0f3/il2cpp-api-functions.h @@ -0,0 +1,268 @@ +#ifndef DO_API_NO_RETURN +#define DO_API_NO_RETURN(r, n, p) DO_API(r,n,p) +#endif + + +DO_API(void, il2cpp_init, (const char* domain_name)); +DO_API(void, il2cpp_init_utf16, (const Il2CppChar * domain_name)); +DO_API(void, il2cpp_shutdown, ()); +DO_API(void, il2cpp_set_config_dir, (const char *config_path)); +DO_API(void, il2cpp_set_data_dir, (const char *data_path)); +DO_API(void, il2cpp_set_temp_dir, (const char *temp_path)); +DO_API(void, il2cpp_set_commandline_arguments, (int argc, const char* const argv[], const char* basedir)); +DO_API(void, il2cpp_set_commandline_arguments_utf16, (int argc, const Il2CppChar * const argv[], const char* basedir)); +DO_API(void, il2cpp_set_config_utf16, (const Il2CppChar * executablePath)); +DO_API(void, il2cpp_set_config, (const char* executablePath)); + +DO_API(void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks * callbacks)); +DO_API(const Il2CppImage*, il2cpp_get_corlib, ()); +DO_API(void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method)); +DO_API(Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name)); + +DO_API(void*, il2cpp_alloc, (size_t size)); +DO_API(void, il2cpp_free, (void* ptr)); + +// array +DO_API(Il2CppClass*, il2cpp_array_class_get, (Il2CppClass * element_class, uint32_t rank)); +DO_API(uint32_t, il2cpp_array_length, (Il2CppArray * array)); +DO_API(uint32_t, il2cpp_array_get_byte_length, (Il2CppArray * array)); +DO_API(Il2CppArray*, il2cpp_array_new, (Il2CppClass * elementTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass * arrayTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_full, (Il2CppClass * array_class, il2cpp_array_size_t * lengths, il2cpp_array_size_t * lower_bounds)); +DO_API(Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass * element_class, uint32_t rank, bool bounded)); +DO_API(int, il2cpp_array_element_size, (const Il2CppClass * array_class)); + +// assembly +DO_API(const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly * assembly)); + +// class +DO_API(const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_generic, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_inflated, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_assignable_from, (Il2CppClass * klass, Il2CppClass * oklass)); +DO_API(bool, il2cpp_class_is_subclass_of, (Il2CppClass * klass, Il2CppClass * klassc, bool check_interfaces)); +DO_API(bool, il2cpp_class_has_parent, (Il2CppClass * klass, Il2CppClass * klassc)); +DO_API(Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage * image, const char* namespaze, const char *name)); +DO_API(Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType * type)); +DO_API(Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass * klass)); +DO_API(const EventInfo*, il2cpp_class_get_events, (Il2CppClass * klass, void* *iter)); +DO_API(FieldInfo*, il2cpp_class_get_fields, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass * klass, const char *name)); +DO_API(FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass * klass, const char *name)); +DO_API(const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass * klass, void* *iter)); +DO_API(const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass * klass, const char* name, int argsCount)); +DO_API(const char*, il2cpp_class_get_name, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_namespace, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_instance_size, (Il2CppClass * klass)); +DO_API(size_t, il2cpp_class_num_fields, (const Il2CppClass * enumKlass)); +DO_API(bool, il2cpp_class_is_valuetype, (const Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_value_size, (Il2CppClass * klass, uint32_t * align)); +DO_API(bool, il2cpp_class_is_blittable, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_flags, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_abstract, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_interface, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_array_element_size, (const Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_from_type, (const Il2CppType * type)); +DO_API(const Il2CppType*, il2cpp_class_get_type, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_has_attribute, (Il2CppClass * klass, Il2CppClass * attr_class)); +DO_API(bool, il2cpp_class_has_references, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_enum, (const Il2CppClass * klass)); +DO_API(const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_assemblyname, (const Il2CppClass * klass)); + +// testing only +DO_API(size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass * klass)); +DO_API(void, il2cpp_class_get_bitmap, (Il2CppClass * klass, size_t * bitmap)); + +// stats +DO_API(bool, il2cpp_stats_dump_to_file, (const char *path)); +DO_API(uint64_t, il2cpp_stats_get_value, (Il2CppStat stat)); + +// domain +DO_API(Il2CppDomain*, il2cpp_domain_get, ()); +DO_API(const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain * domain, const char* name)); +DO_API(const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain * domain, size_t * size)); + +// exception +DO_API_NO_RETURN(void, il2cpp_raise_exception, (Il2CppException*)); +DO_API(Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage * image, const char *name_space, const char *name, const char *msg)); +DO_API(Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg)); +DO_API(void, il2cpp_format_exception, (const Il2CppException * ex, char* message, int message_size)); +DO_API(void, il2cpp_format_stack_trace, (const Il2CppException * ex, char* output, int output_size)); +DO_API(void, il2cpp_unhandled_exception, (Il2CppException*)); + +// field +DO_API(int, il2cpp_field_get_flags, (FieldInfo * field)); +DO_API(const char*, il2cpp_field_get_name, (FieldInfo * field)); +DO_API(Il2CppClass*, il2cpp_field_get_parent, (FieldInfo * field)); +DO_API(size_t, il2cpp_field_get_offset, (FieldInfo * field)); +DO_API(const Il2CppType*, il2cpp_field_get_type, (FieldInfo * field)); +DO_API(void, il2cpp_field_get_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo * field, Il2CppObject * obj)); +DO_API(bool, il2cpp_field_has_attribute, (FieldInfo * field, Il2CppClass * attr_class)); +DO_API(void, il2cpp_field_set_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_get_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_set_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_set_value_object, (Il2CppObject * instance, FieldInfo * field, Il2CppObject * value)); + +// gc +DO_API(void, il2cpp_gc_collect, (int maxGenerations)); +DO_API(int32_t, il2cpp_gc_collect_a_little, ()); +DO_API(void, il2cpp_gc_disable, ()); +DO_API(void, il2cpp_gc_enable, ()); +DO_API(int64_t, il2cpp_gc_get_used_size, ()); +DO_API(int64_t, il2cpp_gc_get_heap_size, ()); + +// gchandle +DO_API(uint32_t, il2cpp_gchandle_new, (Il2CppObject * obj, bool pinned)); +DO_API(uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject * obj, bool track_resurrection)); +DO_API(Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle)); +DO_API(void, il2cpp_gchandle_free, (uint32_t gchandle)); + +// liveness +DO_API(void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass * filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped)); +DO_API(void, il2cpp_unity_liveness_calculation_end, (void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject * root, void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_statics, (void* state)); + +// method +DO_API(const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo * method)); +DO_API(Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_name, (const MethodInfo * method)); +DO_API(Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo * method, Il2CppClass * refclass)); +DO_API(bool, il2cpp_method_is_generic, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_inflated, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_instance, (const MethodInfo * method)); +DO_API(uint32_t, il2cpp_method_get_param_count, (const MethodInfo * method)); +DO_API(const Il2CppType*, il2cpp_method_get_param, (const MethodInfo * method, uint32_t index)); +DO_API(Il2CppClass*, il2cpp_method_get_class, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_has_attribute, (const MethodInfo * method, Il2CppClass * attr_class)); +DO_API(uint32_t, il2cpp_method_get_flags, (const MethodInfo * method, uint32_t * iflags)); +DO_API(uint32_t, il2cpp_method_get_token, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_param_name, (const MethodInfo * method, uint32_t index)); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API(void, il2cpp_profiler_install, (Il2CppProfiler * prof, Il2CppProfileFunc shutdown_callback)); +DO_API(void, il2cpp_profiler_set_events, (Il2CppProfileFlags events)); +DO_API(void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave)); +DO_API(void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback)); +DO_API(void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback)); + +#endif + +// property +DO_API(uint32_t, il2cpp_property_get_flags, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo * prop)); +DO_API(const char*, il2cpp_property_get_name, (PropertyInfo * prop)); +DO_API(Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo * prop)); + +// object +DO_API(Il2CppClass*, il2cpp_object_get_class, (Il2CppObject * obj)); +DO_API(uint32_t, il2cpp_object_get_size, (Il2CppObject * obj)); +DO_API(const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject * obj, const MethodInfo * method)); +DO_API(Il2CppObject*, il2cpp_object_new, (const Il2CppClass * klass)); +DO_API(void*, il2cpp_object_unbox, (Il2CppObject * obj)); + +DO_API(Il2CppObject*, il2cpp_value_box, (Il2CppClass * klass, void* data)); + +// monitor +DO_API(void, il2cpp_monitor_enter, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_enter, (Il2CppObject * obj, uint32_t timeout)); +DO_API(void, il2cpp_monitor_exit, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse_all, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_wait, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_wait, (Il2CppObject * obj, uint32_t timeout)); + +// runtime +DO_API(Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo * method, void *obj, void **params, Il2CppException **exc)); +DO_API(Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo * method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc)); +DO_API(void, il2cpp_runtime_class_init, (Il2CppClass * klass)); +DO_API(void, il2cpp_runtime_object_init, (Il2CppObject * obj)); + +DO_API(void, il2cpp_runtime_object_init_exception, (Il2CppObject * obj, Il2CppException** exc)); + +DO_API(void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value)); + +// string +DO_API(int32_t, il2cpp_string_length, (Il2CppString * str)); +DO_API(Il2CppChar*, il2cpp_string_chars, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_new, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length)); +DO_API(Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar * text, int32_t len)); +DO_API(Il2CppString*, il2cpp_string_new_wrapper, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_intern, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_is_interned, (Il2CppString * str)); + +// thread +DO_API(char*, il2cpp_thread_get_name, (Il2CppThread * thread, uint32_t * len)); +DO_API(Il2CppThread*, il2cpp_thread_current, ()); +DO_API(Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain * domain)); +DO_API(void, il2cpp_thread_detach, (Il2CppThread * thread)); + +DO_API(Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t * size)); +DO_API(bool, il2cpp_is_vm_thread, (Il2CppThread * thread)); + +// stacktrace +DO_API(void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data)); +DO_API(void, il2cpp_thread_walk_frame_stack, (Il2CppThread * thread, Il2CppFrameWalkFunc func, void* user_data)); +DO_API(bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo & frame)); +DO_API(bool, il2cpp_thread_get_top_frame, (Il2CppThread * thread, Il2CppStackFrameInfo & frame)); +DO_API(bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo & frame)); +DO_API(bool, il2cpp_thread_get_frame_at, (Il2CppThread * thread, int32_t offset, Il2CppStackFrameInfo & frame)); +DO_API(int32_t, il2cpp_current_thread_get_stack_depth, ()); +DO_API(int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread * thread)); + +// type +DO_API(Il2CppObject*, il2cpp_type_get_object, (const Il2CppType * type)); +DO_API(int, il2cpp_type_get_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType * type)); +DO_API(char*, il2cpp_type_get_name, (const Il2CppType * type)); + +// image +DO_API(const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_name, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_filename, (const Il2CppImage * image)); +DO_API(const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage * image)); + +// Memory information +DO_API(Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, ()); +DO_API(void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot * snapshot)); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +// Logging +DO_API(void, il2cpp_register_log_callback, (Il2CppLogCallback method)); + +#if IL2CPP_DEBUGGER_ENABLED +// debug +DO_API(const Il2CppDebugTypeInfo*, il2cpp_debug_get_class_info, (const Il2CppClass * klass)); +DO_API(const Il2CppDebugDocument*, il2cpp_debug_class_get_document, (const Il2CppDebugTypeInfo * info)); +DO_API(const char*, il2cpp_debug_document_get_filename, (const Il2CppDebugDocument * document)); +DO_API(const char*, il2cpp_debug_document_get_directory, (const Il2CppDebugDocument * document)); +DO_API(const Il2CppDebugMethodInfo*, il2cpp_debug_get_method_info, (const MethodInfo * method)); +DO_API(const Il2CppDebugDocument*, il2cpp_debug_method_get_document, (const Il2CppDebugMethodInfo * info)); +DO_API(const int32_t*, il2cpp_debug_method_get_offset_table, (const Il2CppDebugMethodInfo * info)); +DO_API(size_t, il2cpp_debug_method_get_code_size, (const Il2CppDebugMethodInfo * info)); +DO_API(void, il2cpp_debug_update_frame_il_offset, (int32_t il_offset)); +DO_API(const Il2CppDebugLocalsInfo**, il2cpp_debug_method_get_locals_info, (const Il2CppDebugMethodInfo * info)); +DO_API(const Il2CppClass*, il2cpp_debug_local_get_type, (const Il2CppDebugLocalsInfo * info)); +DO_API(const char*, il2cpp_debug_local_get_name, (const Il2CppDebugLocalsInfo * info)); +DO_API(uint32_t, il2cpp_debug_local_get_start_offset, (const Il2CppDebugLocalsInfo * info)); +DO_API(uint32_t, il2cpp_debug_local_get_end_offset, (const Il2CppDebugLocalsInfo * info)); +DO_API(Il2CppObject*, il2cpp_debug_method_get_param_value, (const Il2CppStackFrameInfo * info, uint32_t position)); +DO_API(Il2CppObject*, il2cpp_debug_frame_get_local_value, (const Il2CppStackFrameInfo * info, uint32_t position)); +DO_API(void*, il2cpp_debug_method_get_breakpoint_data_at, (const Il2CppDebugMethodInfo * info, int64_t uid, int32_t offset)); +DO_API(void, il2cpp_debug_method_set_breakpoint_data_at, (const Il2CppDebugMethodInfo * info, uint64_t location, void *data)); +DO_API(void, il2cpp_debug_method_clear_breakpoint_data, (const Il2CppDebugMethodInfo * info)); +DO_API(void, il2cpp_debug_method_clear_breakpoint_data_at, (const Il2CppDebugMethodInfo * info, uint64_t location)); +#endif diff --git a/module/src/main/cpp/il2cppapi/2017.2.0f3/il2cpp-class.h b/module/src/main/cpp/il2cppapi/2017.2.0f3/il2cpp-class.h new file mode 100644 index 00000000..1f0b1d14 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2017.2.0f3/il2cpp-class.h @@ -0,0 +1,1027 @@ +typedef uint32_t Il2CppMethodSlot; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef enum Il2CppProfileFlags +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19 +} Il2CppProfileFlags; +typedef enum Il2CppGCEvent +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum Il2CppStat +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum StackFrameType +{ + FRAME_TYPE_MANAGED = 0, + FRAME_TYPE_DEBUGGER_INVOKE = 1, + FRAME_TYPE_MANAGED_TO_NATIVE = 2, + FRAME_TYPE_SENTINEL = 3 +} StackFrameType; +typedef enum Il2CppRuntimeUnhandledExceptionPolicy +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct +{ + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef void (*Il2CppLogCallback)(const char*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef uintptr_t il2cpp_array_size_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST = 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t InteropDataIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +const InteropDataIndex kInteropDataIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +static inline Il2CppMetadataUsage GetEncodedIndexType(EncodedMethodIndex index) +{ + return (Il2CppMetadataUsage)((index & 0xE0000000) >> 29); +} +static inline uint32_t GetDecodedMethodIndex(EncodedMethodIndex index) +{ + return index & 0x1FFFFFFFU; +} +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD, + IL2CPP_RGCTX_DATA_ARRAY, +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + CustomAttributeIndex customAttributeIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + CustomAttributeIndex customAttributeIndex; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex reversePInvokeWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct Il2CppGenericMethodIndices +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyName +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t publicKeyToken[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + uint32_t token; +} Il2CppImageDefinition; +typedef struct Il2CppAssembly +{ + ImageIndex imageIndex; + CustomAttributeIndex customAttributeIndex; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +typedef struct Il2CppRange +{ + int32_t start; + int32_t length; +} Il2CppRange; +typedef struct Il2CppWindowsRuntimeTypeNamePair +{ + StringIndex nameIndex; + TypeIndex typeIndex; +} Il2CppWindowsRuntimeTypeNamePair; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; + int32_t unresolvedVirtualCallParameterTypesOffset; + int32_t unresolvedVirtualCallParameterTypesCount; + int32_t unresolvedVirtualCallParameterRangesOffset; + int32_t unresolvedVirtualCallParameterRangesCount; + int32_t windowsRuntimeTypeNamesOffset; + int32_t windowsRuntimeTypeNamesSize; + int32_t exportedTypeDefinitionsOffset; + int32_t exportedTypeDefinitionsCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union + { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum +{ + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE +} Il2CppCharSet; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppAppDomainSetup Il2CppAppDomainSetup; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct VirtualInvokeData +{ + Il2CppMethodPointer methodPtr; + const MethodInfo* method; +} VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +extern bool g_il2cpp_is_fully_initialized; +typedef struct +{ + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; + Il2CppClass* ireference_class; + Il2CppClass* ikey_value_pair_class; + Il2CppClass* key_value_pair_class; + Il2CppClass* windows_foundation_uri_class; + Il2CppClass* windows_foundation_iuri_runtime_class_class; + Il2CppClass* system_uri_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef struct CustomAttributeTypeCache +{ + int count; + Il2CppClass** attributeTypes; +} CustomAttributeTypeCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(Il2CppMethodPointer, const MethodInfo*, void*, void**); +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *declaring_type; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + CustomAttributeIndex customAttributeIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef void (*PInvokeMarshalToNativeFunc)(void* managedStructure, void* marshaledStructure); +typedef void (*PInvokeMarshalFromNativeFunc)(void* marshaledStructure, void* managedStructure); +typedef void (*PInvokeMarshalCleanupFunc)(void* marshaledStructure); +typedef struct Il2CppIUnknown* (*CreateCCWFunc)(Il2CppObject* obj); +typedef struct Il2CppInteropData +{ + Il2CppMethodPointer delegatePInvokeWrapperFunction; + PInvokeMarshalToNativeFunc pinvokeMarshalToNativeFunction; + PInvokeMarshalFromNativeFunc pinvokeMarshalFromNativeFunction; + PInvokeMarshalCleanupFunc pinvokeMarshalCleanupFunction; + CreateCCWFunc createCCWFunction; + const Il2CppGuid* guid; + const Il2CppType* type; +} Il2CppInteropData; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + const Il2CppType* byval_arg; + const Il2CppType* this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + const Il2CppInteropData* interopData; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t genericRecursionDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t packingSize; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; + uint8_t is_vtable_initialized : 1; + VirtualInvokeData vtable[32]; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppAppDomainSetup* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; +} Il2CppDomain; +typedef struct Il2CppImage +{ + const char* name; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable* nameToClassHashTable; + uint32_t token; +} Il2CppImage; +typedef struct Il2CppCodeGenOptions +{ + bool enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const Il2CppMethodPointer* methodPointers; + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + uint32_t unresolvedVirtualCallCount; + const Il2CppMethodPointer* unresolvedVirtualCallPointers; + uint32_t interopDataCount; + Il2CppInteropData* interopData; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + bool enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/2017.2.1f1/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/2017.2.1f1/il2cpp-api-functions.h new file mode 100644 index 00000000..2dc7fb31 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2017.2.1f1/il2cpp-api-functions.h @@ -0,0 +1,268 @@ +#ifndef DO_API_NO_RETURN +#define DO_API_NO_RETURN(r, n, p) DO_API(r,n,p) +#endif + + +DO_API(void, il2cpp_init, (const char* domain_name)); +DO_API(void, il2cpp_init_utf16, (const Il2CppChar * domain_name)); +DO_API(void, il2cpp_shutdown, ()); +DO_API(void, il2cpp_set_config_dir, (const char *config_path)); +DO_API(void, il2cpp_set_data_dir, (const char *data_path)); +DO_API(void, il2cpp_set_temp_dir, (const char *temp_path)); +DO_API(void, il2cpp_set_commandline_arguments, (int argc, const char* const argv[], const char* basedir)); +DO_API(void, il2cpp_set_commandline_arguments_utf16, (int argc, const Il2CppChar * const argv[], const char* basedir)); +DO_API(void, il2cpp_set_config_utf16, (const Il2CppChar * executablePath)); +DO_API(void, il2cpp_set_config, (const char* executablePath)); + +DO_API(void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks * callbacks)); +DO_API(const Il2CppImage*, il2cpp_get_corlib, ()); +DO_API(void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method)); +DO_API(Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name)); + +DO_API(void*, il2cpp_alloc, (size_t size)); +DO_API(void, il2cpp_free, (void* ptr)); + +// array +DO_API(Il2CppClass*, il2cpp_array_class_get, (Il2CppClass * element_class, uint32_t rank)); +DO_API(uint32_t, il2cpp_array_length, (Il2CppArray * array)); +DO_API(uint32_t, il2cpp_array_get_byte_length, (Il2CppArray * array)); +DO_API(Il2CppArray*, il2cpp_array_new, (Il2CppClass * elementTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass * arrayTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_full, (Il2CppClass * array_class, il2cpp_array_size_t * lengths, il2cpp_array_size_t * lower_bounds)); +DO_API(Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass * element_class, uint32_t rank, bool bounded)); +DO_API(int, il2cpp_array_element_size, (const Il2CppClass * array_class)); + +// assembly +DO_API(const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly * assembly)); + +// class +DO_API(const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_generic, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_inflated, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_assignable_from, (Il2CppClass * klass, Il2CppClass * oklass)); +DO_API(bool, il2cpp_class_is_subclass_of, (Il2CppClass * klass, Il2CppClass * klassc, bool check_interfaces)); +DO_API(bool, il2cpp_class_has_parent, (Il2CppClass * klass, Il2CppClass * klassc)); +DO_API(Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage * image, const char* namespaze, const char *name)); +DO_API(Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType * type)); +DO_API(Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass * klass)); +DO_API(const EventInfo*, il2cpp_class_get_events, (Il2CppClass * klass, void* *iter)); +DO_API(FieldInfo*, il2cpp_class_get_fields, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass * klass, const char *name)); +DO_API(FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass * klass, const char *name)); +DO_API(const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass * klass, void* *iter)); +DO_API(const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass * klass, const char* name, int argsCount)); +DO_API(const char*, il2cpp_class_get_name, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_namespace, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_instance_size, (Il2CppClass * klass)); +DO_API(size_t, il2cpp_class_num_fields, (const Il2CppClass * enumKlass)); +DO_API(bool, il2cpp_class_is_valuetype, (const Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_value_size, (Il2CppClass * klass, uint32_t * align)); +DO_API(bool, il2cpp_class_is_blittable, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_flags, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_abstract, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_interface, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_array_element_size, (const Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_from_type, (const Il2CppType * type)); +DO_API(const Il2CppType*, il2cpp_class_get_type, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_has_attribute, (Il2CppClass * klass, Il2CppClass * attr_class)); +DO_API(bool, il2cpp_class_has_references, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_enum, (const Il2CppClass * klass)); +DO_API(const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_assemblyname, (const Il2CppClass * klass)); + +// testing only +DO_API(size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass * klass)); +DO_API(void, il2cpp_class_get_bitmap, (Il2CppClass * klass, size_t * bitmap)); + +// stats +DO_API(bool, il2cpp_stats_dump_to_file, (const char *path)); +DO_API(uint64_t, il2cpp_stats_get_value, (Il2CppStat stat)); + +// domain +DO_API(Il2CppDomain*, il2cpp_domain_get, ()); +DO_API(const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain * domain, const char* name)); +DO_API(const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain * domain, size_t * size)); + +// exception +DO_API_NO_RETURN(void, il2cpp_raise_exception, (Il2CppException*)); +DO_API(Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage * image, const char *name_space, const char *name, const char *msg)); +DO_API(Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg)); +DO_API(void, il2cpp_format_exception, (const Il2CppException * ex, char* message, int message_size)); +DO_API(void, il2cpp_format_stack_trace, (const Il2CppException * ex, char* output, int output_size)); +DO_API(void, il2cpp_unhandled_exception, (Il2CppException*)); + +// field +DO_API(int, il2cpp_field_get_flags, (FieldInfo * field)); +DO_API(const char*, il2cpp_field_get_name, (FieldInfo * field)); +DO_API(Il2CppClass*, il2cpp_field_get_parent, (FieldInfo * field)); +DO_API(size_t, il2cpp_field_get_offset, (FieldInfo * field)); +DO_API(const Il2CppType*, il2cpp_field_get_type, (FieldInfo * field)); +DO_API(void, il2cpp_field_get_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo * field, Il2CppObject * obj)); +DO_API(bool, il2cpp_field_has_attribute, (FieldInfo * field, Il2CppClass * attr_class)); +DO_API(void, il2cpp_field_set_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_get_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_set_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_set_value_object, (Il2CppObject * instance, FieldInfo * field, Il2CppObject * value)); + +// gc +DO_API(void, il2cpp_gc_collect, (int maxGenerations)); +DO_API(int32_t, il2cpp_gc_collect_a_little, ()); +DO_API(void, il2cpp_gc_disable, ()); +DO_API(void, il2cpp_gc_enable, ()); +DO_API(int64_t, il2cpp_gc_get_used_size, ()); +DO_API(int64_t, il2cpp_gc_get_heap_size, ()); + +// gchandle +DO_API(uint32_t, il2cpp_gchandle_new, (Il2CppObject * obj, bool pinned)); +DO_API(uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject * obj, bool track_resurrection)); +DO_API(Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle)); +DO_API(void, il2cpp_gchandle_free, (uint32_t gchandle)); + +// liveness +DO_API(void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass * filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped)); +DO_API(void, il2cpp_unity_liveness_calculation_end, (void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject * root, void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_statics, (void* state)); + +// method +DO_API(const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo * method)); +DO_API(Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_name, (const MethodInfo * method)); +DO_API(Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo * method, Il2CppClass * refclass)); +DO_API(bool, il2cpp_method_is_generic, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_inflated, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_instance, (const MethodInfo * method)); +DO_API(uint32_t, il2cpp_method_get_param_count, (const MethodInfo * method)); +DO_API(const Il2CppType*, il2cpp_method_get_param, (const MethodInfo * method, uint32_t index)); +DO_API(Il2CppClass*, il2cpp_method_get_class, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_has_attribute, (const MethodInfo * method, Il2CppClass * attr_class)); +DO_API(uint32_t, il2cpp_method_get_flags, (const MethodInfo * method, uint32_t * iflags)); +DO_API(uint32_t, il2cpp_method_get_token, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_param_name, (const MethodInfo * method, uint32_t index)); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API(void, il2cpp_profiler_install, (Il2CppProfiler * prof, Il2CppProfileFunc shutdown_callback)); +DO_API(void, il2cpp_profiler_set_events, (Il2CppProfileFlags events)); +DO_API(void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave)); +DO_API(void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback)); +DO_API(void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback)); + +#endif + +// property +DO_API(uint32_t, il2cpp_property_get_flags, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo * prop)); +DO_API(const char*, il2cpp_property_get_name, (PropertyInfo * prop)); +DO_API(Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo * prop)); + +// object +DO_API(Il2CppClass*, il2cpp_object_get_class, (Il2CppObject * obj)); +DO_API(uint32_t, il2cpp_object_get_size, (Il2CppObject * obj)); +DO_API(const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject * obj, const MethodInfo * method)); +DO_API(Il2CppObject*, il2cpp_object_new, (const Il2CppClass * klass)); +DO_API(void*, il2cpp_object_unbox, (Il2CppObject * obj)); + +DO_API(Il2CppObject*, il2cpp_value_box, (Il2CppClass * klass, void* data)); + +// monitor +DO_API(void, il2cpp_monitor_enter, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_enter, (Il2CppObject * obj, uint32_t timeout)); +DO_API(void, il2cpp_monitor_exit, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse_all, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_wait, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_wait, (Il2CppObject * obj, uint32_t timeout)); + +// runtime +DO_API(Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo * method, void *obj, void **params, Il2CppException **exc)); +DO_API(Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo * method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc)); +DO_API(void, il2cpp_runtime_class_init, (Il2CppClass * klass)); +DO_API(void, il2cpp_runtime_object_init, (Il2CppObject * obj)); + +DO_API(void, il2cpp_runtime_object_init_exception, (Il2CppObject * obj, Il2CppException** exc)); + +DO_API(void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value)); + +// string +DO_API(int32_t, il2cpp_string_length, (Il2CppString * str)); +DO_API(Il2CppChar*, il2cpp_string_chars, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_new, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length)); +DO_API(Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar * text, int32_t len)); +DO_API(Il2CppString*, il2cpp_string_new_wrapper, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_intern, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_is_interned, (Il2CppString * str)); + +// thread +DO_API(char*, il2cpp_thread_get_name, (Il2CppThread * thread, uint32_t * len)); +DO_API(Il2CppThread*, il2cpp_thread_current, ()); +DO_API(Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain * domain)); +DO_API(void, il2cpp_thread_detach, (Il2CppThread * thread)); + +DO_API(Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t * size)); +DO_API(bool, il2cpp_is_vm_thread, (Il2CppThread * thread)); + +// stacktrace +DO_API(void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data)); +DO_API(void, il2cpp_thread_walk_frame_stack, (Il2CppThread * thread, Il2CppFrameWalkFunc func, void* user_data)); +DO_API(bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo & frame)); +DO_API(bool, il2cpp_thread_get_top_frame, (Il2CppThread * thread, Il2CppStackFrameInfo & frame)); +DO_API(bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo & frame)); +DO_API(bool, il2cpp_thread_get_frame_at, (Il2CppThread * thread, int32_t offset, Il2CppStackFrameInfo & frame)); +DO_API(int32_t, il2cpp_current_thread_get_stack_depth, ()); +DO_API(int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread * thread)); + +// type +DO_API(Il2CppObject*, il2cpp_type_get_object, (const Il2CppType * type)); +DO_API(int, il2cpp_type_get_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType * type)); +DO_API(char*, il2cpp_type_get_name, (const Il2CppType * type)); + +// image +DO_API(const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_name, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_filename, (const Il2CppImage * image)); +DO_API(const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage * image)); + +// Memory information +DO_API(Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, ()); +DO_API(void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot * snapshot)); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +// Logging +DO_API(void, il2cpp_register_log_callback, (Il2CppLogCallback method)); + +#if IL2CPP_DEBUGGER_ENABLED +// debug +DO_API(const Il2CppDebugTypeInfo*, il2cpp_debug_get_class_info, (const Il2CppClass * klass)); +DO_API(const Il2CppDebugDocument*, il2cpp_debug_class_get_document, (const Il2CppDebugTypeInfo * info)); +DO_API(const char*, il2cpp_debug_document_get_filename, (const Il2CppDebugDocument * document)); +DO_API(const char*, il2cpp_debug_document_get_directory, (const Il2CppDebugDocument * document)); +DO_API(const Il2CppDebugMethodInfo*, il2cpp_debug_get_method_info, (const MethodInfo * method)); +DO_API(const Il2CppDebugDocument*, il2cpp_debug_method_get_document, (const Il2CppDebugMethodInfo * info)); +DO_API(const int32_t*, il2cpp_debug_method_get_offset_table, (const Il2CppDebugMethodInfo * info)); +DO_API(size_t, il2cpp_debug_method_get_code_size, (const Il2CppDebugMethodInfo * info)); +DO_API(void, il2cpp_debug_update_frame_il_offset, (int32_t il_offset)); +DO_API(const Il2CppDebugLocalsInfo**, il2cpp_debug_method_get_locals_info, (const Il2CppDebugMethodInfo * info)); +DO_API(const Il2CppClass*, il2cpp_debug_local_get_type, (const Il2CppDebugLocalsInfo * info)); +DO_API(const char*, il2cpp_debug_local_get_name, (const Il2CppDebugLocalsInfo * info)); +DO_API(uint32_t, il2cpp_debug_local_get_start_offset, (const Il2CppDebugLocalsInfo * info)); +DO_API(uint32_t, il2cpp_debug_local_get_end_offset, (const Il2CppDebugLocalsInfo * info)); +DO_API(Il2CppObject*, il2cpp_debug_method_get_param_value, (const Il2CppStackFrameInfo * info, uint32_t position)); +DO_API(Il2CppObject*, il2cpp_debug_frame_get_local_value, (const Il2CppStackFrameInfo * info, uint32_t position)); +DO_API(void*, il2cpp_debug_method_get_breakpoint_data_at, (const Il2CppDebugMethodInfo * info, int64_t uid, int32_t offset)); +DO_API(void, il2cpp_debug_method_set_breakpoint_data_at, (const Il2CppDebugMethodInfo * info, uint64_t location, void *data)); +DO_API(void, il2cpp_debug_method_clear_breakpoint_data, (const Il2CppDebugMethodInfo * info)); +DO_API(void, il2cpp_debug_method_clear_breakpoint_data_at, (const Il2CppDebugMethodInfo * info, uint64_t location)); +#endif diff --git a/module/src/main/cpp/il2cppapi/2017.2.1f1/il2cpp-class.h b/module/src/main/cpp/il2cppapi/2017.2.1f1/il2cpp-class.h new file mode 100644 index 00000000..b7202346 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2017.2.1f1/il2cpp-class.h @@ -0,0 +1,1028 @@ +typedef uint32_t Il2CppMethodSlot; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef enum Il2CppProfileFlags +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19 +} Il2CppProfileFlags; +typedef enum Il2CppGCEvent +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum Il2CppStat +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum StackFrameType +{ + FRAME_TYPE_MANAGED = 0, + FRAME_TYPE_DEBUGGER_INVOKE = 1, + FRAME_TYPE_MANAGED_TO_NATIVE = 2, + FRAME_TYPE_SENTINEL = 3 +} StackFrameType; +typedef enum Il2CppRuntimeUnhandledExceptionPolicy +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct +{ + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef void (*Il2CppLogCallback)(const char*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef uintptr_t il2cpp_array_size_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST = 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t InteropDataIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +const InteropDataIndex kInteropDataIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +static inline Il2CppMetadataUsage GetEncodedIndexType(EncodedMethodIndex index) +{ + return (Il2CppMetadataUsage)((index & 0xE0000000) >> 29); +} +static inline uint32_t GetDecodedMethodIndex(EncodedMethodIndex index) +{ + return index & 0x1FFFFFFFU; +} +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD, + IL2CPP_RGCTX_DATA_ARRAY, +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + CustomAttributeIndex customAttributeIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + CustomAttributeIndex customAttributeIndex; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex reversePInvokeWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct Il2CppGenericMethodIndices +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyName +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t publicKeyToken[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + uint32_t token; +} Il2CppImageDefinition; +typedef struct Il2CppAssembly +{ + ImageIndex imageIndex; + CustomAttributeIndex customAttributeIndex; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +typedef struct Il2CppRange +{ + int32_t start; + int32_t length; +} Il2CppRange; +typedef struct Il2CppWindowsRuntimeTypeNamePair +{ + StringIndex nameIndex; + TypeIndex typeIndex; +} Il2CppWindowsRuntimeTypeNamePair; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; + int32_t unresolvedVirtualCallParameterTypesOffset; + int32_t unresolvedVirtualCallParameterTypesCount; + int32_t unresolvedVirtualCallParameterRangesOffset; + int32_t unresolvedVirtualCallParameterRangesCount; + int32_t windowsRuntimeTypeNamesOffset; + int32_t windowsRuntimeTypeNamesSize; + int32_t exportedTypeDefinitionsOffset; + int32_t exportedTypeDefinitionsCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union + { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum +{ + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE +} Il2CppCharSet; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppAppDomainSetup Il2CppAppDomainSetup; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct VirtualInvokeData +{ + Il2CppMethodPointer methodPtr; + const MethodInfo* method; +} VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +extern bool g_il2cpp_is_fully_initialized; +typedef struct +{ + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; + Il2CppClass* ireference_class; + Il2CppClass* ikey_value_pair_class; + Il2CppClass* key_value_pair_class; + Il2CppClass* windows_foundation_uri_class; + Il2CppClass* windows_foundation_iuri_runtime_class_class; + Il2CppClass* system_uri_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef struct CustomAttributeTypeCache +{ + int count; + Il2CppClass** attributeTypes; +} CustomAttributeTypeCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(Il2CppMethodPointer, const MethodInfo*, void*, void**); +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *declaring_type; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + CustomAttributeIndex customAttributeIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef void (*PInvokeMarshalToNativeFunc)(void* managedStructure, void* marshaledStructure); +typedef void (*PInvokeMarshalFromNativeFunc)(void* marshaledStructure, void* managedStructure); +typedef void (*PInvokeMarshalCleanupFunc)(void* marshaledStructure); +typedef struct Il2CppIUnknown* (*CreateCCWFunc)(Il2CppObject* obj); +typedef struct Il2CppInteropData +{ + Il2CppMethodPointer delegatePInvokeWrapperFunction; + PInvokeMarshalToNativeFunc pinvokeMarshalToNativeFunction; + PInvokeMarshalFromNativeFunc pinvokeMarshalFromNativeFunction; + PInvokeMarshalCleanupFunc pinvokeMarshalCleanupFunction; + CreateCCWFunc createCCWFunction; + const Il2CppGuid* guid; + const Il2CppType* type; +} Il2CppInteropData; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + const Il2CppType* byval_arg; + const Il2CppType* this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + const Il2CppInteropData* interopData; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t genericRecursionDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t packingSize; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; + uint8_t is_vtable_initialized : 1; + VirtualInvokeData vtable[32]; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppAppDomainSetup* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; +} Il2CppDomain; +typedef struct Il2CppImage +{ + const char* name; + const char *nameNoExt; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable* nameToClassHashTable; + uint32_t token; +} Il2CppImage; +typedef struct Il2CppCodeGenOptions +{ + bool enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const Il2CppMethodPointer* methodPointers; + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + uint32_t unresolvedVirtualCallCount; + const Il2CppMethodPointer* unresolvedVirtualCallPointers; + uint32_t interopDataCount; + Il2CppInteropData* interopData; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + bool enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/2018.1.0f2/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/2018.1.0f2/il2cpp-api-functions.h new file mode 100644 index 00000000..5eb6b25f --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2018.1.0f2/il2cpp-api-functions.h @@ -0,0 +1,251 @@ +#ifndef DO_API_NO_RETURN +#define DO_API_NO_RETURN(r, n, p) DO_API(r,n,p) +#endif + + +DO_API(void, il2cpp_init, (const char* domain_name)); +DO_API(void, il2cpp_init_utf16, (const Il2CppChar * domain_name)); +DO_API(void, il2cpp_shutdown, ()); +DO_API(void, il2cpp_set_config_dir, (const char *config_path)); +DO_API(void, il2cpp_set_data_dir, (const char *data_path)); +DO_API(void, il2cpp_set_temp_dir, (const char *temp_path)); +DO_API(void, il2cpp_set_commandline_arguments, (int argc, const char* const argv[], const char* basedir)); +DO_API(void, il2cpp_set_commandline_arguments_utf16, (int argc, const Il2CppChar * const argv[], const char* basedir)); +DO_API(void, il2cpp_set_config_utf16, (const Il2CppChar * executablePath)); +DO_API(void, il2cpp_set_config, (const char* executablePath)); + +DO_API(void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks * callbacks)); +DO_API(const Il2CppImage*, il2cpp_get_corlib, ()); +DO_API(void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method)); +DO_API(Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name)); + +DO_API(void*, il2cpp_alloc, (size_t size)); +DO_API(void, il2cpp_free, (void* ptr)); + +// array +DO_API(Il2CppClass*, il2cpp_array_class_get, (Il2CppClass * element_class, uint32_t rank)); +DO_API(uint32_t, il2cpp_array_length, (Il2CppArray * array)); +DO_API(uint32_t, il2cpp_array_get_byte_length, (Il2CppArray * array)); +DO_API(Il2CppArray*, il2cpp_array_new, (Il2CppClass * elementTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass * arrayTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_full, (Il2CppClass * array_class, il2cpp_array_size_t * lengths, il2cpp_array_size_t * lower_bounds)); +DO_API(Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass * element_class, uint32_t rank, bool bounded)); +DO_API(int, il2cpp_array_element_size, (const Il2CppClass * array_class)); + +// assembly +DO_API(const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly * assembly)); + +// class +DO_API(const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_generic, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_inflated, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_assignable_from, (Il2CppClass * klass, Il2CppClass * oklass)); +DO_API(bool, il2cpp_class_is_subclass_of, (Il2CppClass * klass, Il2CppClass * klassc, bool check_interfaces)); +DO_API(bool, il2cpp_class_has_parent, (Il2CppClass * klass, Il2CppClass * klassc)); +DO_API(Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage * image, const char* namespaze, const char *name)); +DO_API(Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType * type)); +DO_API(Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass * klass)); +DO_API(const EventInfo*, il2cpp_class_get_events, (Il2CppClass * klass, void* *iter)); +DO_API(FieldInfo*, il2cpp_class_get_fields, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass * klass, const char *name)); +DO_API(FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass * klass, const char *name)); +DO_API(const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass * klass, void* *iter)); +DO_API(const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass * klass, const char* name, int argsCount)); +DO_API(const char*, il2cpp_class_get_name, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_namespace, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_instance_size, (Il2CppClass * klass)); +DO_API(size_t, il2cpp_class_num_fields, (const Il2CppClass * enumKlass)); +DO_API(bool, il2cpp_class_is_valuetype, (const Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_value_size, (Il2CppClass * klass, uint32_t * align)); +DO_API(bool, il2cpp_class_is_blittable, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_flags, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_abstract, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_interface, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_array_element_size, (const Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_from_type, (const Il2CppType * type)); +DO_API(const Il2CppType*, il2cpp_class_get_type, (Il2CppClass * klass)); +DO_API(uint32_t, il2cpp_class_get_type_token, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_has_attribute, (Il2CppClass * klass, Il2CppClass * attr_class)); +DO_API(bool, il2cpp_class_has_references, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_enum, (const Il2CppClass * klass)); +DO_API(const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_assemblyname, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_rank, (const Il2CppClass * klass)); + +// testing only +DO_API(size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass * klass)); +DO_API(void, il2cpp_class_get_bitmap, (Il2CppClass * klass, size_t * bitmap)); + +// stats +DO_API(bool, il2cpp_stats_dump_to_file, (const char *path)); +DO_API(uint64_t, il2cpp_stats_get_value, (Il2CppStat stat)); + +// domain +DO_API(Il2CppDomain*, il2cpp_domain_get, ()); +DO_API(const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain * domain, const char* name)); +DO_API(const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain * domain, size_t * size)); + +// exception +DO_API_NO_RETURN(void, il2cpp_raise_exception, (Il2CppException*)); +DO_API(Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage * image, const char *name_space, const char *name, const char *msg)); +DO_API(Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg)); +DO_API(void, il2cpp_format_exception, (const Il2CppException * ex, char* message, int message_size)); +DO_API(void, il2cpp_format_stack_trace, (const Il2CppException * ex, char* output, int output_size)); +DO_API(void, il2cpp_unhandled_exception, (Il2CppException*)); + +// field +DO_API(int, il2cpp_field_get_flags, (FieldInfo * field)); +DO_API(const char*, il2cpp_field_get_name, (FieldInfo * field)); +DO_API(Il2CppClass*, il2cpp_field_get_parent, (FieldInfo * field)); +DO_API(size_t, il2cpp_field_get_offset, (FieldInfo * field)); +DO_API(const Il2CppType*, il2cpp_field_get_type, (FieldInfo * field)); +DO_API(void, il2cpp_field_get_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo * field, Il2CppObject * obj)); +DO_API(bool, il2cpp_field_has_attribute, (FieldInfo * field, Il2CppClass * attr_class)); +DO_API(void, il2cpp_field_set_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_get_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_set_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_set_value_object, (Il2CppObject * instance, FieldInfo * field, Il2CppObject * value)); + +// gc +DO_API(void, il2cpp_gc_collect, (int maxGenerations)); +DO_API(int32_t, il2cpp_gc_collect_a_little, ()); +DO_API(void, il2cpp_gc_disable, ()); +DO_API(void, il2cpp_gc_enable, ()); +DO_API(int64_t, il2cpp_gc_get_used_size, ()); +DO_API(int64_t, il2cpp_gc_get_heap_size, ()); + +// gchandle +DO_API(uint32_t, il2cpp_gchandle_new, (Il2CppObject * obj, bool pinned)); +DO_API(uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject * obj, bool track_resurrection)); +DO_API(Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle)); +DO_API(void, il2cpp_gchandle_free, (uint32_t gchandle)); + +// liveness +DO_API(void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass * filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped)); +DO_API(void, il2cpp_unity_liveness_calculation_end, (void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject * root, void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_statics, (void* state)); + +// method +DO_API(const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo * method)); +DO_API(Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_name, (const MethodInfo * method)); +DO_API(Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo * method, Il2CppClass * refclass)); +DO_API(bool, il2cpp_method_is_generic, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_inflated, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_instance, (const MethodInfo * method)); +DO_API(uint32_t, il2cpp_method_get_param_count, (const MethodInfo * method)); +DO_API(const Il2CppType*, il2cpp_method_get_param, (const MethodInfo * method, uint32_t index)); +DO_API(Il2CppClass*, il2cpp_method_get_class, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_has_attribute, (const MethodInfo * method, Il2CppClass * attr_class)); +DO_API(uint32_t, il2cpp_method_get_flags, (const MethodInfo * method, uint32_t * iflags)); +DO_API(uint32_t, il2cpp_method_get_token, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_param_name, (const MethodInfo * method, uint32_t index)); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API(void, il2cpp_profiler_install, (Il2CppProfiler * prof, Il2CppProfileFunc shutdown_callback)); +DO_API(void, il2cpp_profiler_set_events, (Il2CppProfileFlags events)); +DO_API(void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave)); +DO_API(void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback)); +DO_API(void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback)); +DO_API(void, il2cpp_profiler_install_fileio, (Il2CppProfileFileIOFunc callback)); + +#endif + +// property +DO_API(uint32_t, il2cpp_property_get_flags, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo * prop)); +DO_API(const char*, il2cpp_property_get_name, (PropertyInfo * prop)); +DO_API(Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo * prop)); + +// object +DO_API(Il2CppClass*, il2cpp_object_get_class, (Il2CppObject * obj)); +DO_API(uint32_t, il2cpp_object_get_size, (Il2CppObject * obj)); +DO_API(const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject * obj, const MethodInfo * method)); +DO_API(Il2CppObject*, il2cpp_object_new, (const Il2CppClass * klass)); +DO_API(void*, il2cpp_object_unbox, (Il2CppObject * obj)); + +DO_API(Il2CppObject*, il2cpp_value_box, (Il2CppClass * klass, void* data)); + +// monitor +DO_API(void, il2cpp_monitor_enter, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_enter, (Il2CppObject * obj, uint32_t timeout)); +DO_API(void, il2cpp_monitor_exit, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse_all, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_wait, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_wait, (Il2CppObject * obj, uint32_t timeout)); + +// runtime +DO_API(Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo * method, void *obj, void **params, Il2CppException **exc)); +DO_API(Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo * method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc)); +DO_API(void, il2cpp_runtime_class_init, (Il2CppClass * klass)); +DO_API(void, il2cpp_runtime_object_init, (Il2CppObject * obj)); + +DO_API(void, il2cpp_runtime_object_init_exception, (Il2CppObject * obj, Il2CppException** exc)); + +DO_API(void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value)); + +// string +DO_API(int32_t, il2cpp_string_length, (Il2CppString * str)); +DO_API(Il2CppChar*, il2cpp_string_chars, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_new, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length)); +DO_API(Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar * text, int32_t len)); +DO_API(Il2CppString*, il2cpp_string_new_wrapper, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_intern, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_is_interned, (Il2CppString * str)); + +// thread +DO_API(Il2CppThread*, il2cpp_thread_current, ()); +DO_API(Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain * domain)); +DO_API(void, il2cpp_thread_detach, (Il2CppThread * thread)); + +DO_API(Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t * size)); +DO_API(bool, il2cpp_is_vm_thread, (Il2CppThread * thread)); + +// stacktrace +DO_API(void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data)); +DO_API(void, il2cpp_thread_walk_frame_stack, (Il2CppThread * thread, Il2CppFrameWalkFunc func, void* user_data)); +DO_API(bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_thread_get_top_frame, (Il2CppThread * thread, Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_thread_get_frame_at, (Il2CppThread * thread, int32_t offset, Il2CppStackFrameInfo * frame)); +DO_API(int32_t, il2cpp_current_thread_get_stack_depth, ()); +DO_API(int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread * thread)); + +// type +DO_API(Il2CppObject*, il2cpp_type_get_object, (const Il2CppType * type)); +DO_API(int, il2cpp_type_get_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType * type)); +DO_API(char*, il2cpp_type_get_name, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_is_byref, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_equals, (const Il2CppType * type, const Il2CppType * otherType)); + +// image +DO_API(const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_name, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_filename, (const Il2CppImage * image)); +DO_API(const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage * image)); + +// Memory information +DO_API(Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, ()); +DO_API(void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot * snapshot)); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +// Logging +DO_API(void, il2cpp_register_log_callback, (Il2CppLogCallback method)); + +// Debugger +DO_API(void, il2cpp_debugger_set_agent_options, (const char* options)); diff --git a/module/src/main/cpp/il2cppapi/2018.1.0f2/il2cpp-class.h b/module/src/main/cpp/il2cppapi/2018.1.0f2/il2cpp-class.h new file mode 100644 index 00000000..aa6fad17 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2018.1.0f2/il2cpp-class.h @@ -0,0 +1,1099 @@ +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef enum +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19, + IL2CPP_PROFILE_FILEIO = 1 << 20 +} Il2CppProfileFlags; +typedef enum +{ + IL2CPP_PROFILE_FILEIO_WRITE = 0, + IL2CPP_PROFILE_FILEIO_READ +} Il2CppProfileFileIOKind; +typedef enum +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct +{ + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef void (*Il2CppProfileFileIOFunc) (Il2CppProfiler* prof, Il2CppProfileFileIOKind kind, int count); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef void (*Il2CppLogCallback)(const char*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef uintptr_t il2cpp_array_size_t; +typedef uint32_t Il2CppMethodSlot; +const uint32_t kInvalidIl2CppMethodSlot = 65535; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST = 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t InteropDataIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +const InteropDataIndex kInteropDataIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD, + IL2CPP_RGCTX_DATA_ARRAY, +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + CustomAttributeIndex customAttributeIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + CustomAttributeIndex customAttributeIndex; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex reversePInvokeWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyNameDefinition +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t public_key_token[8]; +} Il2CppAssemblyNameDefinition; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + uint32_t token; +} Il2CppImageDefinition; +typedef struct Il2CppAssemblyDefinition +{ + ImageIndex imageIndex; + CustomAttributeIndex customAttributeIndex; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyNameDefinition aname; +} Il2CppAssemblyDefinition; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +typedef struct Il2CppRange +{ + int32_t start; + int32_t length; +} Il2CppRange; +typedef struct Il2CppWindowsRuntimeTypeNamePair +{ + StringIndex nameIndex; + TypeIndex typeIndex; +} Il2CppWindowsRuntimeTypeNamePair; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; + int32_t unresolvedVirtualCallParameterTypesOffset; + int32_t unresolvedVirtualCallParameterTypesCount; + int32_t unresolvedVirtualCallParameterRangesOffset; + int32_t unresolvedVirtualCallParameterRangesCount; + int32_t windowsRuntimeTypeNamesOffset; + int32_t windowsRuntimeTypeNamesSize; + int32_t exportedTypeDefinitionsOffset; + int32_t exportedTypeDefinitionsCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union + { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum Il2CppCallConvention +{ + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE +} Il2CppCharSet; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppAppDomainSetup Il2CppAppDomainSetup; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct VirtualInvokeData +{ + Il2CppMethodPointer methodPtr; + const MethodInfo* method; +} VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +typedef struct Il2CppDefaults +{ + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; + Il2CppClass* ireference_class; + Il2CppClass* ikey_value_pair_class; + Il2CppClass* key_value_pair_class; + Il2CppClass* windows_foundation_uri_class; + Il2CppClass* windows_foundation_iuri_runtime_class_class; + Il2CppClass* system_uri_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef struct CustomAttributeTypeCache +{ + int count; + Il2CppClass** attributeTypes; +} CustomAttributeTypeCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(Il2CppMethodPointer, const MethodInfo*, void*, void**); +typedef enum MethodVariableKind +{ + kMethodVariableKind_This, + kMethodVariableKind_Parameter, + kMethodVariableKind_LocalVariable +} MethodVariableKind; +typedef enum SequencePointKind +{ + kSequencePointKind_Normal, + kSequencePointKind_StepOut +} SequencePointKind; +typedef struct Il2CppMethodExecutionContextInfo +{ + const Il2CppType** m_type; + const char* m_name; + MethodVariableKind m_variableKind; + int m_start; + int m_end; +} Il2CppMethodExecutionContextInfo; +typedef struct Il2CppMethodScope +{ + int startOffset; + int endOffset; +} Il2CppMethodScope; +typedef struct Il2CppMethodHeaderInfo +{ + int m_codeSize; + int m_numScopes; + Il2CppMethodScope *m_scopes; +} Il2CppMethodHeaderInfo; +typedef struct Hash16 +{ + uint8_t m_hash[16]; +} Hash16; +typedef struct Il2CppSequencePoint +{ + const Il2CppMethodExecutionContextInfo* executionContextInfos; + uint32_t executionContextInfoCount; + const Il2CppMethodHeaderInfo* header; + const MethodInfo* method; + const Il2CppClass* catchType; + const char* sourceFile; + Hash16 sourceFileHash; + int32_t lineStart, lineEnd; + int32_t columnStart, columnEnd; + int32_t ilOffset; + SequencePointKind kind; + uint8_t isActive; + int id; + uint8_t tryDepth; +} Il2CppSequencePoint; +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *klass; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + CustomAttributeIndex customAttributeIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; + uint8_t wrapper_type : 1; + uint8_t is_marshaled_from_native : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef void (*PInvokeMarshalToNativeFunc)(void* managedStructure, void* marshaledStructure); +typedef void (*PInvokeMarshalFromNativeFunc)(void* marshaledStructure, void* managedStructure); +typedef void (*PInvokeMarshalCleanupFunc)(void* marshaledStructure); +typedef struct Il2CppIUnknown* (*CreateCCWFunc)(Il2CppObject* obj); +typedef struct Il2CppInteropData +{ + Il2CppMethodPointer delegatePInvokeWrapperFunction; + PInvokeMarshalToNativeFunc pinvokeMarshalToNativeFunction; + PInvokeMarshalFromNativeFunc pinvokeMarshalFromNativeFunction; + PInvokeMarshalCleanupFunc pinvokeMarshalCleanupFunction; + CreateCCWFunc createCCWFunction; + const Il2CppGuid* guid; + const Il2CppType* type; +} Il2CppInteropData; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + Il2CppType byval_arg; + Il2CppType this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + const Il2CppInteropData* interopData; + Il2CppClass* klass; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t genericRecursionDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t packingSize; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; + uint8_t is_vtable_initialized : 1; + VirtualInvokeData vtable[32]; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppAppDomainSetup* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; + void* agent_info; +} Il2CppDomain; +typedef struct Il2CppAssemblyName +{ + const char* name; + const char* culture; + const char* hash_value; + const char* public_key; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t public_key_token[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImage +{ + const char* name; + const char *nameNoExt; + Il2CppAssembly* assembly; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable * nameToClassHashTable; + uint32_t token; + uint8_t dynamic; +} Il2CppImage; +typedef struct Il2CppAssembly +{ + Il2CppImage* image; + CustomAttributeIndex customAttributeIndex; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppCodeGenOptions +{ + uint8_t enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const Il2CppMethodPointer* methodPointers; + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + uint32_t unresolvedVirtualCallCount; + const Il2CppMethodPointer* unresolvedVirtualCallPointers; + uint32_t interopDataCount; + Il2CppInteropData* interopData; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + uint8_t enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/2018.2.0f2/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/2018.2.0f2/il2cpp-api-functions.h new file mode 100644 index 00000000..6a9665c0 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2018.2.0f2/il2cpp-api-functions.h @@ -0,0 +1,258 @@ +#ifndef DO_API_NO_RETURN +#define DO_API_NO_RETURN(r, n, p) DO_API(r,n,p) +#endif + + +DO_API(void, il2cpp_init, (const char* domain_name)); +DO_API(void, il2cpp_init_utf16, (const Il2CppChar * domain_name)); +DO_API(void, il2cpp_shutdown, ()); +DO_API(void, il2cpp_set_config_dir, (const char *config_path)); +DO_API(void, il2cpp_set_data_dir, (const char *data_path)); +DO_API(void, il2cpp_set_temp_dir, (const char *temp_path)); +DO_API(void, il2cpp_set_commandline_arguments, (int argc, const char* const argv[], const char* basedir)); +DO_API(void, il2cpp_set_commandline_arguments_utf16, (int argc, const Il2CppChar * const argv[], const char* basedir)); +DO_API(void, il2cpp_set_config_utf16, (const Il2CppChar * executablePath)); +DO_API(void, il2cpp_set_config, (const char* executablePath)); + +DO_API(void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks * callbacks)); +DO_API(const Il2CppImage*, il2cpp_get_corlib, ()); +DO_API(void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method)); +DO_API(Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name)); + +DO_API(void*, il2cpp_alloc, (size_t size)); +DO_API(void, il2cpp_free, (void* ptr)); + +// array +DO_API(Il2CppClass*, il2cpp_array_class_get, (Il2CppClass * element_class, uint32_t rank)); +DO_API(uint32_t, il2cpp_array_length, (Il2CppArray * array)); +DO_API(uint32_t, il2cpp_array_get_byte_length, (Il2CppArray * array)); +DO_API(Il2CppArray*, il2cpp_array_new, (Il2CppClass * elementTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass * arrayTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_full, (Il2CppClass * array_class, il2cpp_array_size_t * lengths, il2cpp_array_size_t * lower_bounds)); +DO_API(Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass * element_class, uint32_t rank, bool bounded)); +DO_API(int, il2cpp_array_element_size, (const Il2CppClass * array_class)); + +// assembly +DO_API(const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly * assembly)); + +// class +DO_API(const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_generic, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_inflated, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_assignable_from, (Il2CppClass * klass, Il2CppClass * oklass)); +DO_API(bool, il2cpp_class_is_subclass_of, (Il2CppClass * klass, Il2CppClass * klassc, bool check_interfaces)); +DO_API(bool, il2cpp_class_has_parent, (Il2CppClass * klass, Il2CppClass * klassc)); +DO_API(Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage * image, const char* namespaze, const char *name)); +DO_API(Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType * type)); +DO_API(Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass * klass)); +DO_API(const EventInfo*, il2cpp_class_get_events, (Il2CppClass * klass, void* *iter)); +DO_API(FieldInfo*, il2cpp_class_get_fields, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass * klass, const char *name)); +DO_API(FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass * klass, const char *name)); +DO_API(const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass * klass, void* *iter)); +DO_API(const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass * klass, const char* name, int argsCount)); +DO_API(const char*, il2cpp_class_get_name, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_namespace, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_instance_size, (Il2CppClass * klass)); +DO_API(size_t, il2cpp_class_num_fields, (const Il2CppClass * enumKlass)); +DO_API(bool, il2cpp_class_is_valuetype, (const Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_value_size, (Il2CppClass * klass, uint32_t * align)); +DO_API(bool, il2cpp_class_is_blittable, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_flags, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_abstract, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_interface, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_array_element_size, (const Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_from_type, (const Il2CppType * type)); +DO_API(const Il2CppType*, il2cpp_class_get_type, (Il2CppClass * klass)); +DO_API(uint32_t, il2cpp_class_get_type_token, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_has_attribute, (Il2CppClass * klass, Il2CppClass * attr_class)); +DO_API(bool, il2cpp_class_has_references, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_enum, (const Il2CppClass * klass)); +DO_API(const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_assemblyname, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_rank, (const Il2CppClass * klass)); + +// testing only +DO_API(size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass * klass)); +DO_API(void, il2cpp_class_get_bitmap, (Il2CppClass * klass, size_t * bitmap)); + +// stats +DO_API(bool, il2cpp_stats_dump_to_file, (const char *path)); +DO_API(uint64_t, il2cpp_stats_get_value, (Il2CppStat stat)); + +// domain +DO_API(Il2CppDomain*, il2cpp_domain_get, ()); +DO_API(const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain * domain, const char* name)); +DO_API(const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain * domain, size_t * size)); + +// exception +DO_API_NO_RETURN(void, il2cpp_raise_exception, (Il2CppException*)); +DO_API(Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage * image, const char *name_space, const char *name, const char *msg)); +DO_API(Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg)); +DO_API(void, il2cpp_format_exception, (const Il2CppException * ex, char* message, int message_size)); +DO_API(void, il2cpp_format_stack_trace, (const Il2CppException * ex, char* output, int output_size)); +DO_API(void, il2cpp_unhandled_exception, (Il2CppException*)); + +// field +DO_API(int, il2cpp_field_get_flags, (FieldInfo * field)); +DO_API(const char*, il2cpp_field_get_name, (FieldInfo * field)); +DO_API(Il2CppClass*, il2cpp_field_get_parent, (FieldInfo * field)); +DO_API(size_t, il2cpp_field_get_offset, (FieldInfo * field)); +DO_API(const Il2CppType*, il2cpp_field_get_type, (FieldInfo * field)); +DO_API(void, il2cpp_field_get_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo * field, Il2CppObject * obj)); +DO_API(bool, il2cpp_field_has_attribute, (FieldInfo * field, Il2CppClass * attr_class)); +DO_API(void, il2cpp_field_set_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_get_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_set_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_set_value_object, (Il2CppObject * instance, FieldInfo * field, Il2CppObject * value)); + +// gc +DO_API(void, il2cpp_gc_collect, (int maxGenerations)); +DO_API(int32_t, il2cpp_gc_collect_a_little, ()); +DO_API(void, il2cpp_gc_disable, ()); +DO_API(void, il2cpp_gc_enable, ()); +DO_API(int64_t, il2cpp_gc_get_used_size, ()); +DO_API(int64_t, il2cpp_gc_get_heap_size, ()); + +// gchandle +DO_API(uint32_t, il2cpp_gchandle_new, (Il2CppObject * obj, bool pinned)); +DO_API(uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject * obj, bool track_resurrection)); +DO_API(Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle)); +DO_API(void, il2cpp_gchandle_free, (uint32_t gchandle)); + +// liveness +DO_API(void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass * filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped)); +DO_API(void, il2cpp_unity_liveness_calculation_end, (void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject * root, void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_statics, (void* state)); + +// method +DO_API(const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo * method)); +DO_API(Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_name, (const MethodInfo * method)); +DO_API(const MethodInfo*, il2cpp_method_get_from_reflection, (const Il2CppReflectionMethod * method)); +DO_API(Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo * method, Il2CppClass * refclass)); +DO_API(bool, il2cpp_method_is_generic, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_inflated, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_instance, (const MethodInfo * method)); +DO_API(uint32_t, il2cpp_method_get_param_count, (const MethodInfo * method)); +DO_API(const Il2CppType*, il2cpp_method_get_param, (const MethodInfo * method, uint32_t index)); +DO_API(Il2CppClass*, il2cpp_method_get_class, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_has_attribute, (const MethodInfo * method, Il2CppClass * attr_class)); +DO_API(uint32_t, il2cpp_method_get_flags, (const MethodInfo * method, uint32_t * iflags)); +DO_API(uint32_t, il2cpp_method_get_token, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_param_name, (const MethodInfo * method, uint32_t index)); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API(void, il2cpp_profiler_install, (Il2CppProfiler * prof, Il2CppProfileFunc shutdown_callback)); +DO_API(void, il2cpp_profiler_set_events, (Il2CppProfileFlags events)); +DO_API(void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave)); +DO_API(void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback)); +DO_API(void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback)); +DO_API(void, il2cpp_profiler_install_fileio, (Il2CppProfileFileIOFunc callback)); + +#endif + +// property +DO_API(uint32_t, il2cpp_property_get_flags, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo * prop)); +DO_API(const char*, il2cpp_property_get_name, (PropertyInfo * prop)); +DO_API(Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo * prop)); + +// object +DO_API(Il2CppClass*, il2cpp_object_get_class, (Il2CppObject * obj)); +DO_API(uint32_t, il2cpp_object_get_size, (Il2CppObject * obj)); +DO_API(const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject * obj, const MethodInfo * method)); +DO_API(Il2CppObject*, il2cpp_object_new, (const Il2CppClass * klass)); +DO_API(void*, il2cpp_object_unbox, (Il2CppObject * obj)); + +DO_API(Il2CppObject*, il2cpp_value_box, (Il2CppClass * klass, void* data)); + +// monitor +DO_API(void, il2cpp_monitor_enter, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_enter, (Il2CppObject * obj, uint32_t timeout)); +DO_API(void, il2cpp_monitor_exit, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse_all, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_wait, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_wait, (Il2CppObject * obj, uint32_t timeout)); + +// runtime +DO_API(Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo * method, void *obj, void **params, Il2CppException **exc)); +DO_API(Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo * method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc)); +DO_API(void, il2cpp_runtime_class_init, (Il2CppClass * klass)); +DO_API(void, il2cpp_runtime_object_init, (Il2CppObject * obj)); + +DO_API(void, il2cpp_runtime_object_init_exception, (Il2CppObject * obj, Il2CppException** exc)); + +DO_API(void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value)); + +// string +DO_API(int32_t, il2cpp_string_length, (Il2CppString * str)); +DO_API(Il2CppChar*, il2cpp_string_chars, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_new, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length)); +DO_API(Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar * text, int32_t len)); +DO_API(Il2CppString*, il2cpp_string_new_wrapper, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_intern, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_is_interned, (Il2CppString * str)); + +// thread +DO_API(Il2CppThread*, il2cpp_thread_current, ()); +DO_API(Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain * domain)); +DO_API(void, il2cpp_thread_detach, (Il2CppThread * thread)); + +DO_API(Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t * size)); +DO_API(bool, il2cpp_is_vm_thread, (Il2CppThread * thread)); + +// stacktrace +DO_API(void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data)); +DO_API(void, il2cpp_thread_walk_frame_stack, (Il2CppThread * thread, Il2CppFrameWalkFunc func, void* user_data)); +DO_API(bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_thread_get_top_frame, (Il2CppThread * thread, Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_thread_get_frame_at, (Il2CppThread * thread, int32_t offset, Il2CppStackFrameInfo * frame)); +DO_API(int32_t, il2cpp_current_thread_get_stack_depth, ()); +DO_API(int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread * thread)); + +// type +DO_API(Il2CppObject*, il2cpp_type_get_object, (const Il2CppType * type)); +DO_API(int, il2cpp_type_get_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType * type)); +DO_API(char*, il2cpp_type_get_name, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_is_byref, (const Il2CppType * type)); +DO_API(uint32_t, il2cpp_type_get_attrs, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_equals, (const Il2CppType * type, const Il2CppType * otherType)); +DO_API(char*, il2cpp_type_get_assembly_qualified_name, (const Il2CppType * type)); + +// image +DO_API(const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_name, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_filename, (const Il2CppImage * image)); +DO_API(const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage * image)); + +// Memory information +DO_API(Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, ()); +DO_API(void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot * snapshot)); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +// Logging +DO_API(void, il2cpp_register_log_callback, (Il2CppLogCallback method)); + +// Debugger +DO_API(void, il2cpp_debugger_set_agent_options, (const char* options)); +DO_API(bool, il2cpp_is_debugger_attached, ()); + +// TLS module +DO_API(void, il2cpp_unity_install_unitytls_interface, (const void* unitytlsInterfaceStruct)); diff --git a/module/src/main/cpp/il2cppapi/2018.2.0f2/il2cpp-class.h b/module/src/main/cpp/il2cppapi/2018.2.0f2/il2cpp-class.h new file mode 100644 index 00000000..3cd1abb5 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2018.2.0f2/il2cpp-class.h @@ -0,0 +1,1131 @@ +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef enum +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19, + IL2CPP_PROFILE_FILEIO = 1 << 20 +} Il2CppProfileFlags; +typedef enum +{ + IL2CPP_PROFILE_FILEIO_WRITE = 0, + IL2CPP_PROFILE_FILEIO_READ +} Il2CppProfileFileIOKind; +typedef enum +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct +{ + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef void (*Il2CppProfileFileIOFunc) (Il2CppProfiler* prof, Il2CppProfileFileIOKind kind, int count); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef void (*Il2CppLogCallback)(const char*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef uintptr_t il2cpp_array_size_t; +typedef uint32_t Il2CppMethodSlot; +const uint32_t kInvalidIl2CppMethodSlot = 65535; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST = 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t InteropDataIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +const InteropDataIndex kInteropDataIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD, + IL2CPP_RGCTX_DATA_ARRAY, +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + CustomAttributeIndex customAttributeIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + CustomAttributeIndex customAttributeIndex; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex reversePInvokeWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyNameDefinition +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t public_key_token[8]; +} Il2CppAssemblyNameDefinition; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + uint32_t token; +} Il2CppImageDefinition; +typedef struct Il2CppAssemblyDefinition +{ + ImageIndex imageIndex; + CustomAttributeIndex customAttributeIndex; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyNameDefinition aname; +} Il2CppAssemblyDefinition; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +typedef struct Il2CppRange +{ + int32_t start; + int32_t length; +} Il2CppRange; +typedef struct Il2CppWindowsRuntimeTypeNamePair +{ + StringIndex nameIndex; + TypeIndex typeIndex; +} Il2CppWindowsRuntimeTypeNamePair; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; + int32_t unresolvedVirtualCallParameterTypesOffset; + int32_t unresolvedVirtualCallParameterTypesCount; + int32_t unresolvedVirtualCallParameterRangesOffset; + int32_t unresolvedVirtualCallParameterRangesCount; + int32_t windowsRuntimeTypeNamesOffset; + int32_t windowsRuntimeTypeNamesSize; + int32_t exportedTypeDefinitionsOffset; + int32_t exportedTypeDefinitionsCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union + { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum Il2CppCallConvention +{ + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE +} Il2CppCharSet; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppAppDomainSetup Il2CppAppDomainSetup; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct VirtualInvokeData +{ + Il2CppMethodPointer methodPtr; + const MethodInfo* method; +} VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +typedef struct Il2CppDefaults +{ + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; + Il2CppClass* ireference_class; + Il2CppClass* ikey_value_pair_class; + Il2CppClass* key_value_pair_class; + Il2CppClass* windows_foundation_uri_class; + Il2CppClass* windows_foundation_iuri_runtime_class_class; + Il2CppClass* system_uri_class; + Il2CppClass* system_guid_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef struct CustomAttributeTypeCache +{ + int count; + Il2CppClass** attributeTypes; +} CustomAttributeTypeCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(Il2CppMethodPointer, const MethodInfo*, void*, void**); +typedef enum MethodVariableKind +{ + kMethodVariableKind_This, + kMethodVariableKind_Parameter, + kMethodVariableKind_LocalVariable +} MethodVariableKind; +typedef enum SequencePointKind +{ + kSequencePointKind_Normal, + kSequencePointKind_StepOut +} SequencePointKind; +typedef struct Il2CppMethodExecutionContextInfo +{ + TypeIndex typeIndex; + int32_t nameIndex; + MethodVariableKind variableKind; + int32_t startOffset; + int32_t endOffset; +} Il2CppMethodExecutionContextInfo; +typedef struct Il2CppMethodExecutionContextInfoIndex +{ + int8_t tableIndex; + int32_t startIndex; + int32_t count; +} Il2CppMethodExecutionContextInfoIndex; +typedef struct Il2CppMethodScope +{ + int32_t startOffset; + int32_t endOffset; +} Il2CppMethodScope; +typedef struct Il2CppMethodHeaderInfo +{ + int32_t codeSize; + int32_t startScope; + int32_t numScopes; +} Il2CppMethodHeaderInfo; +typedef struct Il2CppSequencePointIndex +{ + uint8_t tableIndex; + int32_t index; +} Il2CppSequencePointIndex; +typedef struct Il2CppSequencePointSourceFile +{ + const char *file; + uint8_t hash[16]; +} Il2CppSequencePointSourceFile; +typedef struct Il2CppTypeSourceFilePair +{ + TypeIndex klassIndex; + int32_t sourceFileIndex; +} Il2CppTypeSourceFilePair; +typedef struct Il2CppSequencePoint +{ + MethodIndex methodIndex; + EncodedMethodIndex methodMetadataIndex; + const MethodInfo *method_; + TypeIndex catchTypeIndex; + int32_t sourceFileIndex; + int32_t lineStart, lineEnd; + int32_t columnStart, columnEnd; + int32_t ilOffset; + SequencePointKind kind; + uint8_t isActive; + int32_t id; + uint8_t tryDepth; +} Il2CppSequencePoint; +typedef struct Il2CppDebuggerMetadataRegistration +{ + Il2CppMethodExecutionContextInfo** methodExecutionContextInfos; + Il2CppMethodExecutionContextInfoIndex* methodExecutionContextInfoIndexes; + Il2CppMethodScope* methodScopes; + Il2CppMethodHeaderInfo* methodHeaderInfos; + Il2CppSequencePointSourceFile* sequencePointSourceFiles; + int32_t numSequencePoints; + Il2CppSequencePointIndex* sequencePointIndexes; + Il2CppSequencePoint** sequencePoints; + int32_t numTypeSourceFileEntries; + Il2CppTypeSourceFilePair* typeSourceFiles; + const char** methodExecutionContextInfoStrings; +} Il2CppDebuggerMetadataRegistration; +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *klass; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + CustomAttributeIndex customAttributeIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; + uint8_t wrapper_type : 1; + uint8_t is_marshaled_from_native : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef void (*PInvokeMarshalToNativeFunc)(void* managedStructure, void* marshaledStructure); +typedef void (*PInvokeMarshalFromNativeFunc)(void* marshaledStructure, void* managedStructure); +typedef void (*PInvokeMarshalCleanupFunc)(void* marshaledStructure); +typedef struct Il2CppIUnknown* (*CreateCCWFunc)(Il2CppObject* obj); +typedef struct Il2CppInteropData +{ + Il2CppMethodPointer delegatePInvokeWrapperFunction; + PInvokeMarshalToNativeFunc pinvokeMarshalToNativeFunction; + PInvokeMarshalFromNativeFunc pinvokeMarshalFromNativeFunction; + PInvokeMarshalCleanupFunc pinvokeMarshalCleanupFunction; + CreateCCWFunc createCCWFunction; + const Il2CppGuid* guid; + const Il2CppType* type; +} Il2CppInteropData; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + Il2CppType byval_arg; + Il2CppType this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + const Il2CppInteropData* interopData; + Il2CppClass* klass; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t initializationExceptionGCHandle; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t genericRecursionDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t packingSize; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; + uint8_t is_vtable_initialized : 1; + uint8_t has_initialization_error : 1; + VirtualInvokeData vtable[32]; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppAppDomainSetup* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; + void* agent_info; +} Il2CppDomain; +typedef struct Il2CppAssemblyName +{ + const char* name; + const char* culture; + const char* hash_value; + const char* public_key; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t public_key_token[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImage +{ + const char* name; + const char *nameNoExt; + Il2CppAssembly* assembly; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable * nameToClassHashTable; + uint32_t token; + uint8_t dynamic; +} Il2CppImage; +typedef struct Il2CppAssembly +{ + Il2CppImage* image; + CustomAttributeIndex customAttributeIndex; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppCodeGenOptions +{ + uint8_t enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const Il2CppMethodPointer* methodPointers; + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + uint32_t unresolvedVirtualCallCount; + const Il2CppMethodPointer* unresolvedVirtualCallPointers; + uint32_t interopDataCount; + Il2CppInteropData* interopData; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + uint8_t enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/2018.3.0f2/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/2018.3.0f2/il2cpp-api-functions.h new file mode 100644 index 00000000..83c18181 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2018.3.0f2/il2cpp-api-functions.h @@ -0,0 +1,274 @@ +#ifndef DO_API_NO_RETURN +#define DO_API_NO_RETURN(r, n, p) DO_API(r,n,p) +#endif + + +DO_API(void, il2cpp_init, (const char* domain_name)); +DO_API(void, il2cpp_init_utf16, (const Il2CppChar * domain_name)); +DO_API(void, il2cpp_shutdown, ()); +DO_API(void, il2cpp_set_config_dir, (const char *config_path)); +DO_API(void, il2cpp_set_data_dir, (const char *data_path)); +DO_API(void, il2cpp_set_temp_dir, (const char *temp_path)); +DO_API(void, il2cpp_set_commandline_arguments, (int argc, const char* const argv[], const char* basedir)); +DO_API(void, il2cpp_set_commandline_arguments_utf16, (int argc, const Il2CppChar * const argv[], const char* basedir)); +DO_API(void, il2cpp_set_config_utf16, (const Il2CppChar * executablePath)); +DO_API(void, il2cpp_set_config, (const char* executablePath)); + +DO_API(void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks * callbacks)); +DO_API(const Il2CppImage*, il2cpp_get_corlib, ()); +DO_API(void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method)); +DO_API(Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name)); + +DO_API(void*, il2cpp_alloc, (size_t size)); +DO_API(void, il2cpp_free, (void* ptr)); + +// array +DO_API(Il2CppClass*, il2cpp_array_class_get, (Il2CppClass * element_class, uint32_t rank)); +DO_API(uint32_t, il2cpp_array_length, (Il2CppArray * array)); +DO_API(uint32_t, il2cpp_array_get_byte_length, (Il2CppArray * array)); +DO_API(Il2CppArray*, il2cpp_array_new, (Il2CppClass * elementTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass * arrayTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_full, (Il2CppClass * array_class, il2cpp_array_size_t * lengths, il2cpp_array_size_t * lower_bounds)); +DO_API(Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass * element_class, uint32_t rank, bool bounded)); +DO_API(int, il2cpp_array_element_size, (const Il2CppClass * array_class)); + +// assembly +DO_API(const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly * assembly)); + +// class +DO_API(const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_generic, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_inflated, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_assignable_from, (Il2CppClass * klass, Il2CppClass * oklass)); +DO_API(bool, il2cpp_class_is_subclass_of, (Il2CppClass * klass, Il2CppClass * klassc, bool check_interfaces)); +DO_API(bool, il2cpp_class_has_parent, (Il2CppClass * klass, Il2CppClass * klassc)); +DO_API(Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage * image, const char* namespaze, const char *name)); +DO_API(Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType * type)); +DO_API(Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass * klass)); +DO_API(const EventInfo*, il2cpp_class_get_events, (Il2CppClass * klass, void* *iter)); +DO_API(FieldInfo*, il2cpp_class_get_fields, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass * klass, const char *name)); +DO_API(FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass * klass, const char *name)); +DO_API(const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass * klass, void* *iter)); +DO_API(const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass * klass, const char* name, int argsCount)); +DO_API(const char*, il2cpp_class_get_name, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_namespace, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_instance_size, (Il2CppClass * klass)); +DO_API(size_t, il2cpp_class_num_fields, (const Il2CppClass * enumKlass)); +DO_API(bool, il2cpp_class_is_valuetype, (const Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_value_size, (Il2CppClass * klass, uint32_t * align)); +DO_API(bool, il2cpp_class_is_blittable, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_flags, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_abstract, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_interface, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_array_element_size, (const Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_from_type, (const Il2CppType * type)); +DO_API(const Il2CppType*, il2cpp_class_get_type, (Il2CppClass * klass)); +DO_API(uint32_t, il2cpp_class_get_type_token, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_has_attribute, (Il2CppClass * klass, Il2CppClass * attr_class)); +DO_API(bool, il2cpp_class_has_references, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_enum, (const Il2CppClass * klass)); +DO_API(const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_assemblyname, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_rank, (const Il2CppClass * klass)); + +// testing only +DO_API(size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass * klass)); +DO_API(void, il2cpp_class_get_bitmap, (Il2CppClass * klass, size_t * bitmap)); + +// stats +DO_API(bool, il2cpp_stats_dump_to_file, (const char *path)); +DO_API(uint64_t, il2cpp_stats_get_value, (Il2CppStat stat)); + +// domain +DO_API(Il2CppDomain*, il2cpp_domain_get, ()); +DO_API(const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain * domain, const char* name)); +DO_API(const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain * domain, size_t * size)); + +// exception +DO_API_NO_RETURN(void, il2cpp_raise_exception, (Il2CppException*)); +DO_API(Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage * image, const char *name_space, const char *name, const char *msg)); +DO_API(Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg)); +DO_API(void, il2cpp_format_exception, (const Il2CppException * ex, char* message, int message_size)); +DO_API(void, il2cpp_format_stack_trace, (const Il2CppException * ex, char* output, int output_size)); +DO_API(void, il2cpp_unhandled_exception, (Il2CppException*)); + +// field +DO_API(int, il2cpp_field_get_flags, (FieldInfo * field)); +DO_API(const char*, il2cpp_field_get_name, (FieldInfo * field)); +DO_API(Il2CppClass*, il2cpp_field_get_parent, (FieldInfo * field)); +DO_API(size_t, il2cpp_field_get_offset, (FieldInfo * field)); +DO_API(const Il2CppType*, il2cpp_field_get_type, (FieldInfo * field)); +DO_API(void, il2cpp_field_get_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo * field, Il2CppObject * obj)); +DO_API(bool, il2cpp_field_has_attribute, (FieldInfo * field, Il2CppClass * attr_class)); +DO_API(void, il2cpp_field_set_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_get_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_set_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_set_value_object, (Il2CppObject * instance, FieldInfo * field, Il2CppObject * value)); + +// gc +DO_API(void, il2cpp_gc_collect, (int maxGenerations)); +DO_API(int32_t, il2cpp_gc_collect_a_little, ()); +DO_API(void, il2cpp_gc_disable, ()); +DO_API(void, il2cpp_gc_enable, ()); +DO_API(bool, il2cpp_gc_is_disabled, ()); +DO_API(int64_t, il2cpp_gc_get_used_size, ()); +DO_API(int64_t, il2cpp_gc_get_heap_size, ()); +DO_API(void, il2cpp_gc_wbarrier_set_field, (Il2CppObject * obj, void **targetAddress, void *object)); + +// gchandle +DO_API(uint32_t, il2cpp_gchandle_new, (Il2CppObject * obj, bool pinned)); +DO_API(uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject * obj, bool track_resurrection)); +DO_API(Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle)); +DO_API(void, il2cpp_gchandle_free, (uint32_t gchandle)); + +// liveness +DO_API(void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass * filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped)); +DO_API(void, il2cpp_unity_liveness_calculation_end, (void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject * root, void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_statics, (void* state)); + +// method +DO_API(const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo * method)); +DO_API(Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_name, (const MethodInfo * method)); +DO_API(const MethodInfo*, il2cpp_method_get_from_reflection, (const Il2CppReflectionMethod * method)); +DO_API(Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo * method, Il2CppClass * refclass)); +DO_API(bool, il2cpp_method_is_generic, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_inflated, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_instance, (const MethodInfo * method)); +DO_API(uint32_t, il2cpp_method_get_param_count, (const MethodInfo * method)); +DO_API(const Il2CppType*, il2cpp_method_get_param, (const MethodInfo * method, uint32_t index)); +DO_API(Il2CppClass*, il2cpp_method_get_class, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_has_attribute, (const MethodInfo * method, Il2CppClass * attr_class)); +DO_API(uint32_t, il2cpp_method_get_flags, (const MethodInfo * method, uint32_t * iflags)); +DO_API(uint32_t, il2cpp_method_get_token, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_param_name, (const MethodInfo * method, uint32_t index)); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API(void, il2cpp_profiler_install, (Il2CppProfiler * prof, Il2CppProfileFunc shutdown_callback)); +DO_API(void, il2cpp_profiler_set_events, (Il2CppProfileFlags events)); +DO_API(void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave)); +DO_API(void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback)); +DO_API(void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback)); +DO_API(void, il2cpp_profiler_install_fileio, (Il2CppProfileFileIOFunc callback)); +DO_API(void, il2cpp_profiler_install_thread, (Il2CppProfileThreadFunc start, Il2CppProfileThreadFunc end)); + +#endif + +// property +DO_API(uint32_t, il2cpp_property_get_flags, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo * prop)); +DO_API(const char*, il2cpp_property_get_name, (PropertyInfo * prop)); +DO_API(Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo * prop)); + +// object +DO_API(Il2CppClass*, il2cpp_object_get_class, (Il2CppObject * obj)); +DO_API(uint32_t, il2cpp_object_get_size, (Il2CppObject * obj)); +DO_API(const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject * obj, const MethodInfo * method)); +DO_API(Il2CppObject*, il2cpp_object_new, (const Il2CppClass * klass)); +DO_API(void*, il2cpp_object_unbox, (Il2CppObject * obj)); + +DO_API(Il2CppObject*, il2cpp_value_box, (Il2CppClass * klass, void* data)); + +// monitor +DO_API(void, il2cpp_monitor_enter, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_enter, (Il2CppObject * obj, uint32_t timeout)); +DO_API(void, il2cpp_monitor_exit, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse_all, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_wait, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_wait, (Il2CppObject * obj, uint32_t timeout)); + +// runtime +DO_API(Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo * method, void *obj, void **params, Il2CppException **exc)); +DO_API(Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo * method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc)); +DO_API(void, il2cpp_runtime_class_init, (Il2CppClass * klass)); +DO_API(void, il2cpp_runtime_object_init, (Il2CppObject * obj)); + +DO_API(void, il2cpp_runtime_object_init_exception, (Il2CppObject * obj, Il2CppException** exc)); + +DO_API(void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value)); + +// string +DO_API(int32_t, il2cpp_string_length, (Il2CppString * str)); +DO_API(Il2CppChar*, il2cpp_string_chars, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_new, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length)); +DO_API(Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar * text, int32_t len)); +DO_API(Il2CppString*, il2cpp_string_new_wrapper, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_intern, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_is_interned, (Il2CppString * str)); + +// thread +DO_API(Il2CppThread*, il2cpp_thread_current, ()); +DO_API(Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain * domain)); +DO_API(void, il2cpp_thread_detach, (Il2CppThread * thread)); + +DO_API(Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t * size)); +DO_API(bool, il2cpp_is_vm_thread, (Il2CppThread * thread)); + +// stacktrace +DO_API(void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data)); +DO_API(void, il2cpp_thread_walk_frame_stack, (Il2CppThread * thread, Il2CppFrameWalkFunc func, void* user_data)); +DO_API(bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_thread_get_top_frame, (Il2CppThread * thread, Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_thread_get_frame_at, (Il2CppThread * thread, int32_t offset, Il2CppStackFrameInfo * frame)); +DO_API(int32_t, il2cpp_current_thread_get_stack_depth, ()); +DO_API(int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread * thread)); + +// type +DO_API(Il2CppObject*, il2cpp_type_get_object, (const Il2CppType * type)); +DO_API(int, il2cpp_type_get_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType * type)); +DO_API(char*, il2cpp_type_get_name, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_is_byref, (const Il2CppType * type)); +DO_API(uint32_t, il2cpp_type_get_attrs, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_equals, (const Il2CppType * type, const Il2CppType * otherType)); +DO_API(char*, il2cpp_type_get_assembly_qualified_name, (const Il2CppType * type)); + +// image +DO_API(const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_name, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_filename, (const Il2CppImage * image)); +DO_API(const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage * image)); + +DO_API(size_t, il2cpp_image_get_class_count, (const Il2CppImage * image)); +DO_API(const Il2CppClass*, il2cpp_image_get_class, (const Il2CppImage * image, size_t index)); + +// Memory information +DO_API(Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, ()); +DO_API(void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot * snapshot)); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +// Logging +DO_API(void, il2cpp_register_log_callback, (Il2CppLogCallback method)); + +// Debugger +DO_API(void, il2cpp_debugger_set_agent_options, (const char* options)); +DO_API(bool, il2cpp_is_debugger_attached, ()); + +// TLS module +DO_API(void, il2cpp_unity_install_unitytls_interface, (const void* unitytlsInterfaceStruct)); + +// custom attributes +DO_API(Il2CppCustomAttrInfo*, il2cpp_custom_attrs_from_class, (Il2CppClass * klass)); +DO_API(Il2CppCustomAttrInfo*, il2cpp_custom_attrs_from_method, (const MethodInfo * method)); + +DO_API(Il2CppObject*, il2cpp_custom_attrs_get_attr, (Il2CppCustomAttrInfo * ainfo, Il2CppClass * attr_klass)); +DO_API(bool, il2cpp_custom_attrs_has_attr, (Il2CppCustomAttrInfo * ainfo, Il2CppClass * attr_klass)); +DO_API(Il2CppArray*, il2cpp_custom_attrs_construct, (Il2CppCustomAttrInfo * cinfo)); + +DO_API(void, il2cpp_custom_attrs_free, (Il2CppCustomAttrInfo * ainfo)); diff --git a/module/src/main/cpp/il2cppapi/2018.3.0f2/il2cpp-class.h b/module/src/main/cpp/il2cppapi/2018.3.0f2/il2cpp-class.h new file mode 100644 index 00000000..0361339e --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2018.3.0f2/il2cpp-class.h @@ -0,0 +1,1122 @@ +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef struct Il2CppCustomAttrInfo Il2CppCustomAttrInfo; +typedef enum +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19, + IL2CPP_PROFILE_FILEIO = 1 << 20 +} Il2CppProfileFlags; +typedef enum +{ + IL2CPP_PROFILE_FILEIO_WRITE = 0, + IL2CPP_PROFILE_FILEIO_READ +} Il2CppProfileFileIOKind; +typedef enum +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct +{ + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef void (*Il2CppProfileFileIOFunc) (Il2CppProfiler* prof, Il2CppProfileFileIOKind kind, int count); +typedef void (*Il2CppProfileThreadFunc) (Il2CppProfiler *prof, unsigned long tid); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef void (*Il2CppLogCallback)(const char*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef uintptr_t il2cpp_array_size_t; +typedef uint32_t Il2CppMethodSlot; +const uint32_t kInvalidIl2CppMethodSlot = 65535; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST = 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t InteropDataIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const CustomAttributeIndex kCustomAttributeIndexInvalid = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +const InteropDataIndex kInteropDataIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD, + IL2CPP_RGCTX_DATA_ARRAY, +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex reversePInvokeWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyNameDefinition +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t public_key_token[8]; +} Il2CppAssemblyNameDefinition; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + uint32_t token; + CustomAttributeIndex customAttributeStart; + uint32_t customAttributeCount; +} Il2CppImageDefinition; +typedef struct Il2CppAssemblyDefinition +{ + ImageIndex imageIndex; + uint32_t token; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyNameDefinition aname; +} Il2CppAssemblyDefinition; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + uint32_t token; + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +typedef struct Il2CppRange +{ + int32_t start; + int32_t length; +} Il2CppRange; +typedef struct Il2CppWindowsRuntimeTypeNamePair +{ + StringIndex nameIndex; + TypeIndex typeIndex; +} Il2CppWindowsRuntimeTypeNamePair; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; + int32_t unresolvedVirtualCallParameterTypesOffset; + int32_t unresolvedVirtualCallParameterTypesCount; + int32_t unresolvedVirtualCallParameterRangesOffset; + int32_t unresolvedVirtualCallParameterRangesCount; + int32_t windowsRuntimeTypeNamesOffset; + int32_t windowsRuntimeTypeNamesSize; + int32_t exportedTypeDefinitionsOffset; + int32_t exportedTypeDefinitionsCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union + { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum Il2CppCallConvention +{ + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE, + CHARSET_NOT_SPECIFIED +} Il2CppCharSet; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppAppDomainSetup Il2CppAppDomainSetup; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct VirtualInvokeData +{ + Il2CppMethodPointer methodPtr; + const MethodInfo* method; +} VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +typedef struct Il2CppDefaults +{ + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *attribute_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; + Il2CppClass* ireference_class; + Il2CppClass* ireferencearray_class; + Il2CppClass* ikey_value_pair_class; + Il2CppClass* key_value_pair_class; + Il2CppClass* windows_foundation_uri_class; + Il2CppClass* windows_foundation_iuri_runtime_class_class; + Il2CppClass* system_uri_class; + Il2CppClass* system_guid_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(Il2CppMethodPointer, const MethodInfo*, void*, void**); +typedef enum MethodVariableKind +{ + kMethodVariableKind_This, + kMethodVariableKind_Parameter, + kMethodVariableKind_LocalVariable +} MethodVariableKind; +typedef enum SequencePointKind +{ + kSequencePointKind_Normal, + kSequencePointKind_StepOut +} SequencePointKind; +typedef struct Il2CppMethodExecutionContextInfo +{ + TypeIndex typeIndex; + int32_t nameIndex; + int32_t scopeIndex; +} Il2CppMethodExecutionContextInfo; +typedef struct Il2CppMethodExecutionContextInfoIndex +{ + int8_t tableIndex; + int32_t startIndex; + int32_t count; +} Il2CppMethodExecutionContextInfoIndex; +typedef struct Il2CppMethodScope +{ + int32_t startOffset; + int32_t endOffset; +} Il2CppMethodScope; +typedef struct Il2CppMethodHeaderInfo +{ + int32_t codeSize; + int32_t startScope; + int32_t numScopes; +} Il2CppMethodHeaderInfo; +typedef struct Il2CppSequencePointIndex +{ + uint8_t tableIndex; + int32_t index; +} Il2CppSequencePointIndex; +typedef struct Il2CppSequencePointSourceFile +{ + const char *file; + uint8_t hash[16]; +} Il2CppSequencePointSourceFile; +typedef struct Il2CppTypeSourceFilePair +{ + TypeIndex klassIndex; + int32_t sourceFileIndex; +} Il2CppTypeSourceFilePair; +typedef struct Il2CppSequencePoint +{ + MethodIndex methodDefinitionIndex; + TypeIndex catchTypeIndex; + int32_t sourceFileIndex; + int32_t lineStart, lineEnd; + int32_t columnStart, columnEnd; + int32_t ilOffset; + SequencePointKind kind; + uint8_t isActive; + int32_t id; + uint8_t tryDepth; +} Il2CppSequencePoint; +typedef struct Il2CppDebuggerMetadataRegistration +{ + Il2CppMethodExecutionContextInfo** methodExecutionContextInfos; + Il2CppMethodExecutionContextInfoIndex* methodExecutionContextInfoIndexes; + Il2CppMethodScope* methodScopes; + Il2CppMethodHeaderInfo* methodHeaderInfos; + Il2CppSequencePointSourceFile* sequencePointSourceFiles; + int32_t numSequencePoints; + Il2CppSequencePointIndex* sequencePointIndexes; + Il2CppSequencePoint** sequencePoints; + int32_t numTypeSourceFileEntries; + Il2CppTypeSourceFilePair* typeSourceFiles; + const char** methodExecutionContextInfoStrings; +} Il2CppDebuggerMetadataRegistration; +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *klass; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; + uint8_t wrapper_type : 1; + uint8_t is_marshaled_from_native : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef void (*PInvokeMarshalToNativeFunc)(void* managedStructure, void* marshaledStructure); +typedef void (*PInvokeMarshalFromNativeFunc)(void* marshaledStructure, void* managedStructure); +typedef void (*PInvokeMarshalCleanupFunc)(void* marshaledStructure); +typedef struct Il2CppIUnknown* (*CreateCCWFunc)(Il2CppObject* obj); +typedef struct Il2CppInteropData +{ + Il2CppMethodPointer delegatePInvokeWrapperFunction; + PInvokeMarshalToNativeFunc pinvokeMarshalToNativeFunction; + PInvokeMarshalFromNativeFunc pinvokeMarshalFromNativeFunction; + PInvokeMarshalCleanupFunc pinvokeMarshalCleanupFunction; + CreateCCWFunc createCCWFunction; + const Il2CppGuid* guid; + const Il2CppType* type; +} Il2CppInteropData; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + Il2CppType byval_arg; + Il2CppType this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + const Il2CppInteropData* interopData; + Il2CppClass* klass; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t initializationExceptionGCHandle; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t genericRecursionDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t packingSize; + uint8_t initialized_and_no_error : 1; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; + uint8_t is_vtable_initialized : 1; + uint8_t has_initialization_error : 1; + VirtualInvokeData vtable[32]; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppAppDomainSetup* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; + void* agent_info; +} Il2CppDomain; +typedef struct Il2CppAssemblyName +{ + const char* name; + const char* culture; + const char* hash_value; + const char* public_key; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t public_key_token[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImage +{ + const char* name; + const char *nameNoExt; + Il2CppAssembly* assembly; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + CustomAttributeIndex customAttributeStart; + uint32_t customAttributeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable * nameToClassHashTable; + uint32_t token; + uint8_t dynamic; +} Il2CppImage; +typedef struct Il2CppAssembly +{ + Il2CppImage* image; + uint32_t token; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppCodeGenOptions +{ + uint8_t enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const Il2CppMethodPointer* methodPointers; + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + uint32_t unresolvedVirtualCallCount; + const Il2CppMethodPointer* unresolvedVirtualCallPointers; + uint32_t interopDataCount; + Il2CppInteropData* interopData; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + uint8_t enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/2018.3.8f1/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/2018.3.8f1/il2cpp-api-functions.h new file mode 100644 index 00000000..83c18181 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2018.3.8f1/il2cpp-api-functions.h @@ -0,0 +1,274 @@ +#ifndef DO_API_NO_RETURN +#define DO_API_NO_RETURN(r, n, p) DO_API(r,n,p) +#endif + + +DO_API(void, il2cpp_init, (const char* domain_name)); +DO_API(void, il2cpp_init_utf16, (const Il2CppChar * domain_name)); +DO_API(void, il2cpp_shutdown, ()); +DO_API(void, il2cpp_set_config_dir, (const char *config_path)); +DO_API(void, il2cpp_set_data_dir, (const char *data_path)); +DO_API(void, il2cpp_set_temp_dir, (const char *temp_path)); +DO_API(void, il2cpp_set_commandline_arguments, (int argc, const char* const argv[], const char* basedir)); +DO_API(void, il2cpp_set_commandline_arguments_utf16, (int argc, const Il2CppChar * const argv[], const char* basedir)); +DO_API(void, il2cpp_set_config_utf16, (const Il2CppChar * executablePath)); +DO_API(void, il2cpp_set_config, (const char* executablePath)); + +DO_API(void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks * callbacks)); +DO_API(const Il2CppImage*, il2cpp_get_corlib, ()); +DO_API(void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method)); +DO_API(Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name)); + +DO_API(void*, il2cpp_alloc, (size_t size)); +DO_API(void, il2cpp_free, (void* ptr)); + +// array +DO_API(Il2CppClass*, il2cpp_array_class_get, (Il2CppClass * element_class, uint32_t rank)); +DO_API(uint32_t, il2cpp_array_length, (Il2CppArray * array)); +DO_API(uint32_t, il2cpp_array_get_byte_length, (Il2CppArray * array)); +DO_API(Il2CppArray*, il2cpp_array_new, (Il2CppClass * elementTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass * arrayTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_full, (Il2CppClass * array_class, il2cpp_array_size_t * lengths, il2cpp_array_size_t * lower_bounds)); +DO_API(Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass * element_class, uint32_t rank, bool bounded)); +DO_API(int, il2cpp_array_element_size, (const Il2CppClass * array_class)); + +// assembly +DO_API(const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly * assembly)); + +// class +DO_API(const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_generic, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_inflated, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_assignable_from, (Il2CppClass * klass, Il2CppClass * oklass)); +DO_API(bool, il2cpp_class_is_subclass_of, (Il2CppClass * klass, Il2CppClass * klassc, bool check_interfaces)); +DO_API(bool, il2cpp_class_has_parent, (Il2CppClass * klass, Il2CppClass * klassc)); +DO_API(Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage * image, const char* namespaze, const char *name)); +DO_API(Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType * type)); +DO_API(Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass * klass)); +DO_API(const EventInfo*, il2cpp_class_get_events, (Il2CppClass * klass, void* *iter)); +DO_API(FieldInfo*, il2cpp_class_get_fields, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass * klass, const char *name)); +DO_API(FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass * klass, const char *name)); +DO_API(const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass * klass, void* *iter)); +DO_API(const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass * klass, const char* name, int argsCount)); +DO_API(const char*, il2cpp_class_get_name, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_namespace, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_instance_size, (Il2CppClass * klass)); +DO_API(size_t, il2cpp_class_num_fields, (const Il2CppClass * enumKlass)); +DO_API(bool, il2cpp_class_is_valuetype, (const Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_value_size, (Il2CppClass * klass, uint32_t * align)); +DO_API(bool, il2cpp_class_is_blittable, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_flags, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_abstract, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_interface, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_array_element_size, (const Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_from_type, (const Il2CppType * type)); +DO_API(const Il2CppType*, il2cpp_class_get_type, (Il2CppClass * klass)); +DO_API(uint32_t, il2cpp_class_get_type_token, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_has_attribute, (Il2CppClass * klass, Il2CppClass * attr_class)); +DO_API(bool, il2cpp_class_has_references, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_enum, (const Il2CppClass * klass)); +DO_API(const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_assemblyname, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_rank, (const Il2CppClass * klass)); + +// testing only +DO_API(size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass * klass)); +DO_API(void, il2cpp_class_get_bitmap, (Il2CppClass * klass, size_t * bitmap)); + +// stats +DO_API(bool, il2cpp_stats_dump_to_file, (const char *path)); +DO_API(uint64_t, il2cpp_stats_get_value, (Il2CppStat stat)); + +// domain +DO_API(Il2CppDomain*, il2cpp_domain_get, ()); +DO_API(const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain * domain, const char* name)); +DO_API(const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain * domain, size_t * size)); + +// exception +DO_API_NO_RETURN(void, il2cpp_raise_exception, (Il2CppException*)); +DO_API(Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage * image, const char *name_space, const char *name, const char *msg)); +DO_API(Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg)); +DO_API(void, il2cpp_format_exception, (const Il2CppException * ex, char* message, int message_size)); +DO_API(void, il2cpp_format_stack_trace, (const Il2CppException * ex, char* output, int output_size)); +DO_API(void, il2cpp_unhandled_exception, (Il2CppException*)); + +// field +DO_API(int, il2cpp_field_get_flags, (FieldInfo * field)); +DO_API(const char*, il2cpp_field_get_name, (FieldInfo * field)); +DO_API(Il2CppClass*, il2cpp_field_get_parent, (FieldInfo * field)); +DO_API(size_t, il2cpp_field_get_offset, (FieldInfo * field)); +DO_API(const Il2CppType*, il2cpp_field_get_type, (FieldInfo * field)); +DO_API(void, il2cpp_field_get_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo * field, Il2CppObject * obj)); +DO_API(bool, il2cpp_field_has_attribute, (FieldInfo * field, Il2CppClass * attr_class)); +DO_API(void, il2cpp_field_set_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_get_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_set_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_set_value_object, (Il2CppObject * instance, FieldInfo * field, Il2CppObject * value)); + +// gc +DO_API(void, il2cpp_gc_collect, (int maxGenerations)); +DO_API(int32_t, il2cpp_gc_collect_a_little, ()); +DO_API(void, il2cpp_gc_disable, ()); +DO_API(void, il2cpp_gc_enable, ()); +DO_API(bool, il2cpp_gc_is_disabled, ()); +DO_API(int64_t, il2cpp_gc_get_used_size, ()); +DO_API(int64_t, il2cpp_gc_get_heap_size, ()); +DO_API(void, il2cpp_gc_wbarrier_set_field, (Il2CppObject * obj, void **targetAddress, void *object)); + +// gchandle +DO_API(uint32_t, il2cpp_gchandle_new, (Il2CppObject * obj, bool pinned)); +DO_API(uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject * obj, bool track_resurrection)); +DO_API(Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle)); +DO_API(void, il2cpp_gchandle_free, (uint32_t gchandle)); + +// liveness +DO_API(void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass * filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped)); +DO_API(void, il2cpp_unity_liveness_calculation_end, (void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject * root, void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_statics, (void* state)); + +// method +DO_API(const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo * method)); +DO_API(Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_name, (const MethodInfo * method)); +DO_API(const MethodInfo*, il2cpp_method_get_from_reflection, (const Il2CppReflectionMethod * method)); +DO_API(Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo * method, Il2CppClass * refclass)); +DO_API(bool, il2cpp_method_is_generic, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_inflated, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_instance, (const MethodInfo * method)); +DO_API(uint32_t, il2cpp_method_get_param_count, (const MethodInfo * method)); +DO_API(const Il2CppType*, il2cpp_method_get_param, (const MethodInfo * method, uint32_t index)); +DO_API(Il2CppClass*, il2cpp_method_get_class, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_has_attribute, (const MethodInfo * method, Il2CppClass * attr_class)); +DO_API(uint32_t, il2cpp_method_get_flags, (const MethodInfo * method, uint32_t * iflags)); +DO_API(uint32_t, il2cpp_method_get_token, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_param_name, (const MethodInfo * method, uint32_t index)); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API(void, il2cpp_profiler_install, (Il2CppProfiler * prof, Il2CppProfileFunc shutdown_callback)); +DO_API(void, il2cpp_profiler_set_events, (Il2CppProfileFlags events)); +DO_API(void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave)); +DO_API(void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback)); +DO_API(void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback)); +DO_API(void, il2cpp_profiler_install_fileio, (Il2CppProfileFileIOFunc callback)); +DO_API(void, il2cpp_profiler_install_thread, (Il2CppProfileThreadFunc start, Il2CppProfileThreadFunc end)); + +#endif + +// property +DO_API(uint32_t, il2cpp_property_get_flags, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo * prop)); +DO_API(const char*, il2cpp_property_get_name, (PropertyInfo * prop)); +DO_API(Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo * prop)); + +// object +DO_API(Il2CppClass*, il2cpp_object_get_class, (Il2CppObject * obj)); +DO_API(uint32_t, il2cpp_object_get_size, (Il2CppObject * obj)); +DO_API(const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject * obj, const MethodInfo * method)); +DO_API(Il2CppObject*, il2cpp_object_new, (const Il2CppClass * klass)); +DO_API(void*, il2cpp_object_unbox, (Il2CppObject * obj)); + +DO_API(Il2CppObject*, il2cpp_value_box, (Il2CppClass * klass, void* data)); + +// monitor +DO_API(void, il2cpp_monitor_enter, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_enter, (Il2CppObject * obj, uint32_t timeout)); +DO_API(void, il2cpp_monitor_exit, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse_all, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_wait, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_wait, (Il2CppObject * obj, uint32_t timeout)); + +// runtime +DO_API(Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo * method, void *obj, void **params, Il2CppException **exc)); +DO_API(Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo * method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc)); +DO_API(void, il2cpp_runtime_class_init, (Il2CppClass * klass)); +DO_API(void, il2cpp_runtime_object_init, (Il2CppObject * obj)); + +DO_API(void, il2cpp_runtime_object_init_exception, (Il2CppObject * obj, Il2CppException** exc)); + +DO_API(void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value)); + +// string +DO_API(int32_t, il2cpp_string_length, (Il2CppString * str)); +DO_API(Il2CppChar*, il2cpp_string_chars, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_new, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length)); +DO_API(Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar * text, int32_t len)); +DO_API(Il2CppString*, il2cpp_string_new_wrapper, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_intern, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_is_interned, (Il2CppString * str)); + +// thread +DO_API(Il2CppThread*, il2cpp_thread_current, ()); +DO_API(Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain * domain)); +DO_API(void, il2cpp_thread_detach, (Il2CppThread * thread)); + +DO_API(Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t * size)); +DO_API(bool, il2cpp_is_vm_thread, (Il2CppThread * thread)); + +// stacktrace +DO_API(void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data)); +DO_API(void, il2cpp_thread_walk_frame_stack, (Il2CppThread * thread, Il2CppFrameWalkFunc func, void* user_data)); +DO_API(bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_thread_get_top_frame, (Il2CppThread * thread, Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_thread_get_frame_at, (Il2CppThread * thread, int32_t offset, Il2CppStackFrameInfo * frame)); +DO_API(int32_t, il2cpp_current_thread_get_stack_depth, ()); +DO_API(int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread * thread)); + +// type +DO_API(Il2CppObject*, il2cpp_type_get_object, (const Il2CppType * type)); +DO_API(int, il2cpp_type_get_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType * type)); +DO_API(char*, il2cpp_type_get_name, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_is_byref, (const Il2CppType * type)); +DO_API(uint32_t, il2cpp_type_get_attrs, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_equals, (const Il2CppType * type, const Il2CppType * otherType)); +DO_API(char*, il2cpp_type_get_assembly_qualified_name, (const Il2CppType * type)); + +// image +DO_API(const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_name, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_filename, (const Il2CppImage * image)); +DO_API(const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage * image)); + +DO_API(size_t, il2cpp_image_get_class_count, (const Il2CppImage * image)); +DO_API(const Il2CppClass*, il2cpp_image_get_class, (const Il2CppImage * image, size_t index)); + +// Memory information +DO_API(Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, ()); +DO_API(void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot * snapshot)); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +// Logging +DO_API(void, il2cpp_register_log_callback, (Il2CppLogCallback method)); + +// Debugger +DO_API(void, il2cpp_debugger_set_agent_options, (const char* options)); +DO_API(bool, il2cpp_is_debugger_attached, ()); + +// TLS module +DO_API(void, il2cpp_unity_install_unitytls_interface, (const void* unitytlsInterfaceStruct)); + +// custom attributes +DO_API(Il2CppCustomAttrInfo*, il2cpp_custom_attrs_from_class, (Il2CppClass * klass)); +DO_API(Il2CppCustomAttrInfo*, il2cpp_custom_attrs_from_method, (const MethodInfo * method)); + +DO_API(Il2CppObject*, il2cpp_custom_attrs_get_attr, (Il2CppCustomAttrInfo * ainfo, Il2CppClass * attr_klass)); +DO_API(bool, il2cpp_custom_attrs_has_attr, (Il2CppCustomAttrInfo * ainfo, Il2CppClass * attr_klass)); +DO_API(Il2CppArray*, il2cpp_custom_attrs_construct, (Il2CppCustomAttrInfo * cinfo)); + +DO_API(void, il2cpp_custom_attrs_free, (Il2CppCustomAttrInfo * ainfo)); diff --git a/module/src/main/cpp/il2cppapi/2018.3.8f1/il2cpp-class.h b/module/src/main/cpp/il2cppapi/2018.3.8f1/il2cpp-class.h new file mode 100644 index 00000000..f083d042 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2018.3.8f1/il2cpp-class.h @@ -0,0 +1,1123 @@ +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef struct Il2CppCustomAttrInfo Il2CppCustomAttrInfo; +typedef enum +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19, + IL2CPP_PROFILE_FILEIO = 1 << 20 +} Il2CppProfileFlags; +typedef enum +{ + IL2CPP_PROFILE_FILEIO_WRITE = 0, + IL2CPP_PROFILE_FILEIO_READ +} Il2CppProfileFileIOKind; +typedef enum +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct +{ + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef void (*Il2CppProfileFileIOFunc) (Il2CppProfiler* prof, Il2CppProfileFileIOKind kind, int count); +typedef void (*Il2CppProfileThreadFunc) (Il2CppProfiler *prof, unsigned long tid); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef void (*Il2CppLogCallback)(const char*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef uintptr_t il2cpp_array_size_t; +typedef uint32_t Il2CppMethodSlot; +const uint32_t kInvalidIl2CppMethodSlot = 65535; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST = 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t InteropDataIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const CustomAttributeIndex kCustomAttributeIndexInvalid = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +const InteropDataIndex kInteropDataIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD, + IL2CPP_RGCTX_DATA_ARRAY, +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex reversePInvokeWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyNameDefinition +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t public_key_token[8]; +} Il2CppAssemblyNameDefinition; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + uint32_t token; + CustomAttributeIndex customAttributeStart; + uint32_t customAttributeCount; +} Il2CppImageDefinition; +typedef struct Il2CppAssemblyDefinition +{ + ImageIndex imageIndex; + uint32_t token; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyNameDefinition aname; +} Il2CppAssemblyDefinition; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + uint32_t token; + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +typedef struct Il2CppRange +{ + int32_t start; + int32_t length; +} Il2CppRange; +typedef struct Il2CppWindowsRuntimeTypeNamePair +{ + StringIndex nameIndex; + TypeIndex typeIndex; +} Il2CppWindowsRuntimeTypeNamePair; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; + int32_t unresolvedVirtualCallParameterTypesOffset; + int32_t unresolvedVirtualCallParameterTypesCount; + int32_t unresolvedVirtualCallParameterRangesOffset; + int32_t unresolvedVirtualCallParameterRangesCount; + int32_t windowsRuntimeTypeNamesOffset; + int32_t windowsRuntimeTypeNamesSize; + int32_t exportedTypeDefinitionsOffset; + int32_t exportedTypeDefinitionsCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union + { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum Il2CppCallConvention +{ + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE, + CHARSET_NOT_SPECIFIED +} Il2CppCharSet; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppAppDomainSetup Il2CppAppDomainSetup; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct VirtualInvokeData +{ + Il2CppMethodPointer methodPtr; + const MethodInfo* method; +} VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +typedef struct Il2CppDefaults +{ + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *attribute_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; + Il2CppClass* ireference_class; + Il2CppClass* ireferencearray_class; + Il2CppClass* ikey_value_pair_class; + Il2CppClass* key_value_pair_class; + Il2CppClass* windows_foundation_uri_class; + Il2CppClass* windows_foundation_iuri_runtime_class_class; + Il2CppClass* system_uri_class; + Il2CppClass* system_guid_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(Il2CppMethodPointer, const MethodInfo*, void*, void**); +typedef enum MethodVariableKind +{ + kMethodVariableKind_This, + kMethodVariableKind_Parameter, + kMethodVariableKind_LocalVariable +} MethodVariableKind; +typedef enum SequencePointKind +{ + kSequencePointKind_Normal, + kSequencePointKind_StepOut +} SequencePointKind; +typedef struct Il2CppMethodExecutionContextInfo +{ + TypeIndex typeIndex; + int32_t nameIndex; + int32_t scopeIndex; +} Il2CppMethodExecutionContextInfo; +typedef struct Il2CppMethodExecutionContextInfoIndex +{ + int8_t tableIndex; + int32_t startIndex; + int32_t count; +} Il2CppMethodExecutionContextInfoIndex; +typedef struct Il2CppMethodScope +{ + int32_t startOffset; + int32_t endOffset; +} Il2CppMethodScope; +typedef struct Il2CppMethodHeaderInfo +{ + int32_t codeSize; + int32_t startScope; + int32_t numScopes; +} Il2CppMethodHeaderInfo; +typedef struct Il2CppSequencePointIndex +{ + uint8_t tableIndex; + int32_t index; +} Il2CppSequencePointIndex; +typedef struct Il2CppSequencePointSourceFile +{ + const char *file; + uint8_t hash[16]; +} Il2CppSequencePointSourceFile; +typedef struct Il2CppTypeSourceFilePair +{ + TypeIndex klassIndex; + int32_t sourceFileIndex; +} Il2CppTypeSourceFilePair; +typedef struct Il2CppSequencePoint +{ + MethodIndex methodDefinitionIndex; + TypeIndex catchTypeIndex; + int32_t sourceFileIndex; + int32_t lineStart, lineEnd; + int32_t columnStart, columnEnd; + int32_t ilOffset; + SequencePointKind kind; + uint8_t isActive; + int32_t id; + uint8_t tryDepth; +} Il2CppSequencePoint; +typedef struct Il2CppDebuggerMetadataRegistration +{ + Il2CppMethodExecutionContextInfo** methodExecutionContextInfos; + Il2CppMethodExecutionContextInfoIndex* methodExecutionContextInfoIndexes; + Il2CppMethodScope* methodScopes; + Il2CppMethodHeaderInfo* methodHeaderInfos; + Il2CppSequencePointSourceFile* sequencePointSourceFiles; + int32_t numSequencePoints; + Il2CppSequencePointIndex* sequencePointIndexes; + Il2CppSequencePoint** sequencePoints; + int32_t numTypeSourceFileEntries; + Il2CppTypeSourceFilePair* typeSourceFiles; + const char** methodExecutionContextInfoStrings; +} Il2CppDebuggerMetadataRegistration; +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *klass; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; + uint8_t wrapper_type : 1; + uint8_t is_marshaled_from_native : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef void (*PInvokeMarshalToNativeFunc)(void* managedStructure, void* marshaledStructure); +typedef void (*PInvokeMarshalFromNativeFunc)(void* marshaledStructure, void* managedStructure); +typedef void (*PInvokeMarshalCleanupFunc)(void* marshaledStructure); +typedef struct Il2CppIUnknown* (*CreateCCWFunc)(Il2CppObject* obj); +typedef struct Il2CppInteropData +{ + Il2CppMethodPointer delegatePInvokeWrapperFunction; + PInvokeMarshalToNativeFunc pinvokeMarshalToNativeFunction; + PInvokeMarshalFromNativeFunc pinvokeMarshalFromNativeFunction; + PInvokeMarshalCleanupFunc pinvokeMarshalCleanupFunction; + CreateCCWFunc createCCWFunction; + const Il2CppGuid* guid; + const Il2CppType* type; +} Il2CppInteropData; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + Il2CppType byval_arg; + Il2CppType this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + const Il2CppInteropData* interopData; + Il2CppClass* klass; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t initializationExceptionGCHandle; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t genericRecursionDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t naturalAligment; + uint8_t packingSize; + uint8_t initialized_and_no_error : 1; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; + uint8_t is_vtable_initialized : 1; + uint8_t has_initialization_error : 1; + VirtualInvokeData vtable[32]; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppAppDomainSetup* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; + void* agent_info; +} Il2CppDomain; +typedef struct Il2CppAssemblyName +{ + const char* name; + const char* culture; + const char* hash_value; + const char* public_key; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t public_key_token[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImage +{ + const char* name; + const char *nameNoExt; + Il2CppAssembly* assembly; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + CustomAttributeIndex customAttributeStart; + uint32_t customAttributeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable * nameToClassHashTable; + uint32_t token; + uint8_t dynamic; +} Il2CppImage; +typedef struct Il2CppAssembly +{ + Il2CppImage* image; + uint32_t token; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppCodeGenOptions +{ + uint8_t enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const Il2CppMethodPointer* methodPointers; + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + uint32_t unresolvedVirtualCallCount; + const Il2CppMethodPointer* unresolvedVirtualCallPointers; + uint32_t interopDataCount; + Il2CppInteropData* interopData; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + uint8_t enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/2018.4.18f1/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/2018.4.18f1/il2cpp-api-functions.h new file mode 100644 index 00000000..83c18181 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2018.4.18f1/il2cpp-api-functions.h @@ -0,0 +1,274 @@ +#ifndef DO_API_NO_RETURN +#define DO_API_NO_RETURN(r, n, p) DO_API(r,n,p) +#endif + + +DO_API(void, il2cpp_init, (const char* domain_name)); +DO_API(void, il2cpp_init_utf16, (const Il2CppChar * domain_name)); +DO_API(void, il2cpp_shutdown, ()); +DO_API(void, il2cpp_set_config_dir, (const char *config_path)); +DO_API(void, il2cpp_set_data_dir, (const char *data_path)); +DO_API(void, il2cpp_set_temp_dir, (const char *temp_path)); +DO_API(void, il2cpp_set_commandline_arguments, (int argc, const char* const argv[], const char* basedir)); +DO_API(void, il2cpp_set_commandline_arguments_utf16, (int argc, const Il2CppChar * const argv[], const char* basedir)); +DO_API(void, il2cpp_set_config_utf16, (const Il2CppChar * executablePath)); +DO_API(void, il2cpp_set_config, (const char* executablePath)); + +DO_API(void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks * callbacks)); +DO_API(const Il2CppImage*, il2cpp_get_corlib, ()); +DO_API(void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method)); +DO_API(Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name)); + +DO_API(void*, il2cpp_alloc, (size_t size)); +DO_API(void, il2cpp_free, (void* ptr)); + +// array +DO_API(Il2CppClass*, il2cpp_array_class_get, (Il2CppClass * element_class, uint32_t rank)); +DO_API(uint32_t, il2cpp_array_length, (Il2CppArray * array)); +DO_API(uint32_t, il2cpp_array_get_byte_length, (Il2CppArray * array)); +DO_API(Il2CppArray*, il2cpp_array_new, (Il2CppClass * elementTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass * arrayTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_full, (Il2CppClass * array_class, il2cpp_array_size_t * lengths, il2cpp_array_size_t * lower_bounds)); +DO_API(Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass * element_class, uint32_t rank, bool bounded)); +DO_API(int, il2cpp_array_element_size, (const Il2CppClass * array_class)); + +// assembly +DO_API(const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly * assembly)); + +// class +DO_API(const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_generic, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_inflated, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_assignable_from, (Il2CppClass * klass, Il2CppClass * oklass)); +DO_API(bool, il2cpp_class_is_subclass_of, (Il2CppClass * klass, Il2CppClass * klassc, bool check_interfaces)); +DO_API(bool, il2cpp_class_has_parent, (Il2CppClass * klass, Il2CppClass * klassc)); +DO_API(Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage * image, const char* namespaze, const char *name)); +DO_API(Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType * type)); +DO_API(Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass * klass)); +DO_API(const EventInfo*, il2cpp_class_get_events, (Il2CppClass * klass, void* *iter)); +DO_API(FieldInfo*, il2cpp_class_get_fields, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass * klass, const char *name)); +DO_API(FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass * klass, const char *name)); +DO_API(const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass * klass, void* *iter)); +DO_API(const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass * klass, const char* name, int argsCount)); +DO_API(const char*, il2cpp_class_get_name, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_namespace, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_instance_size, (Il2CppClass * klass)); +DO_API(size_t, il2cpp_class_num_fields, (const Il2CppClass * enumKlass)); +DO_API(bool, il2cpp_class_is_valuetype, (const Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_value_size, (Il2CppClass * klass, uint32_t * align)); +DO_API(bool, il2cpp_class_is_blittable, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_flags, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_abstract, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_interface, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_array_element_size, (const Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_from_type, (const Il2CppType * type)); +DO_API(const Il2CppType*, il2cpp_class_get_type, (Il2CppClass * klass)); +DO_API(uint32_t, il2cpp_class_get_type_token, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_has_attribute, (Il2CppClass * klass, Il2CppClass * attr_class)); +DO_API(bool, il2cpp_class_has_references, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_enum, (const Il2CppClass * klass)); +DO_API(const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_assemblyname, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_rank, (const Il2CppClass * klass)); + +// testing only +DO_API(size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass * klass)); +DO_API(void, il2cpp_class_get_bitmap, (Il2CppClass * klass, size_t * bitmap)); + +// stats +DO_API(bool, il2cpp_stats_dump_to_file, (const char *path)); +DO_API(uint64_t, il2cpp_stats_get_value, (Il2CppStat stat)); + +// domain +DO_API(Il2CppDomain*, il2cpp_domain_get, ()); +DO_API(const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain * domain, const char* name)); +DO_API(const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain * domain, size_t * size)); + +// exception +DO_API_NO_RETURN(void, il2cpp_raise_exception, (Il2CppException*)); +DO_API(Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage * image, const char *name_space, const char *name, const char *msg)); +DO_API(Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg)); +DO_API(void, il2cpp_format_exception, (const Il2CppException * ex, char* message, int message_size)); +DO_API(void, il2cpp_format_stack_trace, (const Il2CppException * ex, char* output, int output_size)); +DO_API(void, il2cpp_unhandled_exception, (Il2CppException*)); + +// field +DO_API(int, il2cpp_field_get_flags, (FieldInfo * field)); +DO_API(const char*, il2cpp_field_get_name, (FieldInfo * field)); +DO_API(Il2CppClass*, il2cpp_field_get_parent, (FieldInfo * field)); +DO_API(size_t, il2cpp_field_get_offset, (FieldInfo * field)); +DO_API(const Il2CppType*, il2cpp_field_get_type, (FieldInfo * field)); +DO_API(void, il2cpp_field_get_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo * field, Il2CppObject * obj)); +DO_API(bool, il2cpp_field_has_attribute, (FieldInfo * field, Il2CppClass * attr_class)); +DO_API(void, il2cpp_field_set_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_get_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_set_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_set_value_object, (Il2CppObject * instance, FieldInfo * field, Il2CppObject * value)); + +// gc +DO_API(void, il2cpp_gc_collect, (int maxGenerations)); +DO_API(int32_t, il2cpp_gc_collect_a_little, ()); +DO_API(void, il2cpp_gc_disable, ()); +DO_API(void, il2cpp_gc_enable, ()); +DO_API(bool, il2cpp_gc_is_disabled, ()); +DO_API(int64_t, il2cpp_gc_get_used_size, ()); +DO_API(int64_t, il2cpp_gc_get_heap_size, ()); +DO_API(void, il2cpp_gc_wbarrier_set_field, (Il2CppObject * obj, void **targetAddress, void *object)); + +// gchandle +DO_API(uint32_t, il2cpp_gchandle_new, (Il2CppObject * obj, bool pinned)); +DO_API(uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject * obj, bool track_resurrection)); +DO_API(Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle)); +DO_API(void, il2cpp_gchandle_free, (uint32_t gchandle)); + +// liveness +DO_API(void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass * filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped)); +DO_API(void, il2cpp_unity_liveness_calculation_end, (void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject * root, void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_statics, (void* state)); + +// method +DO_API(const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo * method)); +DO_API(Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_name, (const MethodInfo * method)); +DO_API(const MethodInfo*, il2cpp_method_get_from_reflection, (const Il2CppReflectionMethod * method)); +DO_API(Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo * method, Il2CppClass * refclass)); +DO_API(bool, il2cpp_method_is_generic, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_inflated, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_instance, (const MethodInfo * method)); +DO_API(uint32_t, il2cpp_method_get_param_count, (const MethodInfo * method)); +DO_API(const Il2CppType*, il2cpp_method_get_param, (const MethodInfo * method, uint32_t index)); +DO_API(Il2CppClass*, il2cpp_method_get_class, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_has_attribute, (const MethodInfo * method, Il2CppClass * attr_class)); +DO_API(uint32_t, il2cpp_method_get_flags, (const MethodInfo * method, uint32_t * iflags)); +DO_API(uint32_t, il2cpp_method_get_token, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_param_name, (const MethodInfo * method, uint32_t index)); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API(void, il2cpp_profiler_install, (Il2CppProfiler * prof, Il2CppProfileFunc shutdown_callback)); +DO_API(void, il2cpp_profiler_set_events, (Il2CppProfileFlags events)); +DO_API(void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave)); +DO_API(void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback)); +DO_API(void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback)); +DO_API(void, il2cpp_profiler_install_fileio, (Il2CppProfileFileIOFunc callback)); +DO_API(void, il2cpp_profiler_install_thread, (Il2CppProfileThreadFunc start, Il2CppProfileThreadFunc end)); + +#endif + +// property +DO_API(uint32_t, il2cpp_property_get_flags, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo * prop)); +DO_API(const char*, il2cpp_property_get_name, (PropertyInfo * prop)); +DO_API(Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo * prop)); + +// object +DO_API(Il2CppClass*, il2cpp_object_get_class, (Il2CppObject * obj)); +DO_API(uint32_t, il2cpp_object_get_size, (Il2CppObject * obj)); +DO_API(const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject * obj, const MethodInfo * method)); +DO_API(Il2CppObject*, il2cpp_object_new, (const Il2CppClass * klass)); +DO_API(void*, il2cpp_object_unbox, (Il2CppObject * obj)); + +DO_API(Il2CppObject*, il2cpp_value_box, (Il2CppClass * klass, void* data)); + +// monitor +DO_API(void, il2cpp_monitor_enter, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_enter, (Il2CppObject * obj, uint32_t timeout)); +DO_API(void, il2cpp_monitor_exit, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse_all, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_wait, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_wait, (Il2CppObject * obj, uint32_t timeout)); + +// runtime +DO_API(Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo * method, void *obj, void **params, Il2CppException **exc)); +DO_API(Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo * method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc)); +DO_API(void, il2cpp_runtime_class_init, (Il2CppClass * klass)); +DO_API(void, il2cpp_runtime_object_init, (Il2CppObject * obj)); + +DO_API(void, il2cpp_runtime_object_init_exception, (Il2CppObject * obj, Il2CppException** exc)); + +DO_API(void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value)); + +// string +DO_API(int32_t, il2cpp_string_length, (Il2CppString * str)); +DO_API(Il2CppChar*, il2cpp_string_chars, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_new, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length)); +DO_API(Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar * text, int32_t len)); +DO_API(Il2CppString*, il2cpp_string_new_wrapper, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_intern, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_is_interned, (Il2CppString * str)); + +// thread +DO_API(Il2CppThread*, il2cpp_thread_current, ()); +DO_API(Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain * domain)); +DO_API(void, il2cpp_thread_detach, (Il2CppThread * thread)); + +DO_API(Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t * size)); +DO_API(bool, il2cpp_is_vm_thread, (Il2CppThread * thread)); + +// stacktrace +DO_API(void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data)); +DO_API(void, il2cpp_thread_walk_frame_stack, (Il2CppThread * thread, Il2CppFrameWalkFunc func, void* user_data)); +DO_API(bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_thread_get_top_frame, (Il2CppThread * thread, Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_thread_get_frame_at, (Il2CppThread * thread, int32_t offset, Il2CppStackFrameInfo * frame)); +DO_API(int32_t, il2cpp_current_thread_get_stack_depth, ()); +DO_API(int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread * thread)); + +// type +DO_API(Il2CppObject*, il2cpp_type_get_object, (const Il2CppType * type)); +DO_API(int, il2cpp_type_get_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType * type)); +DO_API(char*, il2cpp_type_get_name, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_is_byref, (const Il2CppType * type)); +DO_API(uint32_t, il2cpp_type_get_attrs, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_equals, (const Il2CppType * type, const Il2CppType * otherType)); +DO_API(char*, il2cpp_type_get_assembly_qualified_name, (const Il2CppType * type)); + +// image +DO_API(const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_name, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_filename, (const Il2CppImage * image)); +DO_API(const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage * image)); + +DO_API(size_t, il2cpp_image_get_class_count, (const Il2CppImage * image)); +DO_API(const Il2CppClass*, il2cpp_image_get_class, (const Il2CppImage * image, size_t index)); + +// Memory information +DO_API(Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, ()); +DO_API(void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot * snapshot)); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +// Logging +DO_API(void, il2cpp_register_log_callback, (Il2CppLogCallback method)); + +// Debugger +DO_API(void, il2cpp_debugger_set_agent_options, (const char* options)); +DO_API(bool, il2cpp_is_debugger_attached, ()); + +// TLS module +DO_API(void, il2cpp_unity_install_unitytls_interface, (const void* unitytlsInterfaceStruct)); + +// custom attributes +DO_API(Il2CppCustomAttrInfo*, il2cpp_custom_attrs_from_class, (Il2CppClass * klass)); +DO_API(Il2CppCustomAttrInfo*, il2cpp_custom_attrs_from_method, (const MethodInfo * method)); + +DO_API(Il2CppObject*, il2cpp_custom_attrs_get_attr, (Il2CppCustomAttrInfo * ainfo, Il2CppClass * attr_klass)); +DO_API(bool, il2cpp_custom_attrs_has_attr, (Il2CppCustomAttrInfo * ainfo, Il2CppClass * attr_klass)); +DO_API(Il2CppArray*, il2cpp_custom_attrs_construct, (Il2CppCustomAttrInfo * cinfo)); + +DO_API(void, il2cpp_custom_attrs_free, (Il2CppCustomAttrInfo * ainfo)); diff --git a/module/src/main/cpp/il2cppapi/2018.4.18f1/il2cpp-class.h b/module/src/main/cpp/il2cppapi/2018.4.18f1/il2cpp-class.h new file mode 100644 index 00000000..4fcd9fe5 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2018.4.18f1/il2cpp-class.h @@ -0,0 +1,1130 @@ +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef struct Il2CppCustomAttrInfo Il2CppCustomAttrInfo; +typedef enum +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19, + IL2CPP_PROFILE_FILEIO = 1 << 20 +} Il2CppProfileFlags; +typedef enum +{ + IL2CPP_PROFILE_FILEIO_WRITE = 0, + IL2CPP_PROFILE_FILEIO_READ +} Il2CppProfileFileIOKind; +typedef enum +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct +{ + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef void (*Il2CppProfileFileIOFunc) (Il2CppProfiler* prof, Il2CppProfileFileIOKind kind, int count); +typedef void (*Il2CppProfileThreadFunc) (Il2CppProfiler *prof, unsigned long tid); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef void (*Il2CppLogCallback)(const char*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef uintptr_t il2cpp_array_size_t; +typedef uint32_t Il2CppMethodSlot; +const uint32_t kInvalidIl2CppMethodSlot = 65535; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST = 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t InteropDataIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const CustomAttributeIndex kCustomAttributeIndexInvalid = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +const InteropDataIndex kInteropDataIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD, + IL2CPP_RGCTX_DATA_ARRAY, +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex reversePInvokeWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyNameDefinition +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t public_key_token[8]; +} Il2CppAssemblyNameDefinition; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + uint32_t token; + CustomAttributeIndex customAttributeStart; + uint32_t customAttributeCount; +} Il2CppImageDefinition; +typedef struct Il2CppAssemblyDefinition +{ + ImageIndex imageIndex; + uint32_t token; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyNameDefinition aname; +} Il2CppAssemblyDefinition; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + uint32_t token; + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +typedef struct Il2CppRange +{ + int32_t start; + int32_t length; +} Il2CppRange; +typedef struct Il2CppWindowsRuntimeTypeNamePair +{ + StringIndex nameIndex; + TypeIndex typeIndex; +} Il2CppWindowsRuntimeTypeNamePair; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; + int32_t unresolvedVirtualCallParameterTypesOffset; + int32_t unresolvedVirtualCallParameterTypesCount; + int32_t unresolvedVirtualCallParameterRangesOffset; + int32_t unresolvedVirtualCallParameterRangesCount; + int32_t windowsRuntimeTypeNamesOffset; + int32_t windowsRuntimeTypeNamesSize; + int32_t exportedTypeDefinitionsOffset; + int32_t exportedTypeDefinitionsCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union + { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum Il2CppCallConvention +{ + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE, + CHARSET_NOT_SPECIFIED +} Il2CppCharSet; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppAppDomainSetup Il2CppAppDomainSetup; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct VirtualInvokeData +{ + Il2CppMethodPointer methodPtr; + const MethodInfo* method; +} VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +typedef struct Il2CppDefaults +{ + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *attribute_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; + Il2CppClass* ireference_class; + Il2CppClass* ireferencearray_class; + Il2CppClass* ikey_value_pair_class; + Il2CppClass* key_value_pair_class; + Il2CppClass* windows_foundation_uri_class; + Il2CppClass* windows_foundation_iuri_runtime_class_class; + Il2CppClass* system_uri_class; + Il2CppClass* system_guid_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(Il2CppMethodPointer, const MethodInfo*, void*, void**); +typedef enum MethodVariableKind +{ + kMethodVariableKind_This, + kMethodVariableKind_Parameter, + kMethodVariableKind_LocalVariable +} MethodVariableKind; +typedef enum SequencePointKind +{ + kSequencePointKind_Normal, + kSequencePointKind_StepOut +} SequencePointKind; +typedef struct Il2CppMethodExecutionContextInfo +{ + TypeIndex typeIndex; + int32_t nameIndex; + int32_t scopeIndex; +} Il2CppMethodExecutionContextInfo; +typedef struct Il2CppMethodExecutionContextInfoIndex +{ + int8_t tableIndex; + int32_t startIndex; + int32_t count; +} Il2CppMethodExecutionContextInfoIndex; +typedef struct Il2CppMethodScope +{ + int32_t startOffset; + int32_t endOffset; +} Il2CppMethodScope; +typedef struct Il2CppMethodHeaderInfo +{ + int32_t codeSize; + int32_t startScope; + int32_t numScopes; +} Il2CppMethodHeaderInfo; +typedef struct Il2CppSequencePointIndex +{ + uint8_t tableIndex; + int32_t index; +} Il2CppSequencePointIndex; +typedef struct Il2CppSequencePointSourceFile +{ + const char *file; + uint8_t hash[16]; +} Il2CppSequencePointSourceFile; +typedef struct Il2CppTypeSourceFilePair +{ + TypeIndex klassIndex; + int32_t sourceFileIndex; +} Il2CppTypeSourceFilePair; +typedef struct Il2CppSequencePoint +{ + MethodIndex methodDefinitionIndex; + TypeIndex catchTypeIndex; + int32_t sourceFileIndex; + int32_t lineStart, lineEnd; + int32_t columnStart, columnEnd; + int32_t ilOffset; + SequencePointKind kind; + uint8_t isActive; + int32_t id; + uint8_t tryDepth; +} Il2CppSequencePoint; +typedef struct Il2CppDebuggerMetadataRegistration +{ + Il2CppMethodExecutionContextInfo** methodExecutionContextInfos; + Il2CppMethodExecutionContextInfoIndex* methodExecutionContextInfoIndexes; + Il2CppMethodScope* methodScopes; + Il2CppMethodHeaderInfo* methodHeaderInfos; + Il2CppSequencePointSourceFile* sequencePointSourceFiles; + int32_t numSequencePoints; + Il2CppSequencePointIndex* sequencePointIndexes; + Il2CppSequencePoint** sequencePoints; + int32_t numTypeSourceFileEntries; + Il2CppTypeSourceFilePair* typeSourceFiles; + const char** methodExecutionContextInfoStrings; +} Il2CppDebuggerMetadataRegistration; +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *klass; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; + uint8_t wrapper_type : 1; + uint8_t is_marshaled_from_native : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef void (*PInvokeMarshalToNativeFunc)(void* managedStructure, void* marshaledStructure); +typedef void (*PInvokeMarshalFromNativeFunc)(void* marshaledStructure, void* managedStructure); +typedef void (*PInvokeMarshalCleanupFunc)(void* marshaledStructure); +typedef struct Il2CppIUnknown* (*CreateCCWFunc)(Il2CppObject* obj); +typedef struct Il2CppInteropData +{ + Il2CppMethodPointer delegatePInvokeWrapperFunction; + PInvokeMarshalToNativeFunc pinvokeMarshalToNativeFunction; + PInvokeMarshalFromNativeFunc pinvokeMarshalFromNativeFunction; + PInvokeMarshalCleanupFunc pinvokeMarshalCleanupFunction; + CreateCCWFunc createCCWFunction; + const Il2CppGuid* guid; + const Il2CppType* type; +} Il2CppInteropData; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + Il2CppType byval_arg; + Il2CppType this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + const Il2CppInteropData* interopData; + Il2CppClass* klass; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t initializationExceptionGCHandle; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t genericRecursionDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t naturalAligment; + uint8_t packingSize; + uint8_t initialized_and_no_error : 1; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; + uint8_t is_vtable_initialized : 1; + uint8_t has_initialization_error : 1; + VirtualInvokeData vtable[32]; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppAppDomainSetup* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; + void* agent_info; +} Il2CppDomain; +typedef struct Il2CppAssemblyName +{ + const char* name; + const char* culture; + const char* hash_value; + const char* public_key; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t public_key_token[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImage +{ + const char* name; + const char *nameNoExt; + Il2CppAssembly* assembly; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + CustomAttributeIndex customAttributeStart; + uint32_t customAttributeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable * nameToClassHashTable; + uint32_t token; + uint8_t dynamic; +} Il2CppImage; +typedef struct Il2CppAssembly +{ + Il2CppImage* image; + uint32_t token; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppCodeGenOptions +{ + uint8_t enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppWindowsRuntimeFactoryTableEntry +{ + const Il2CppType* type; + Il2CppMethodPointer createFactoryFunction; +} Il2CppWindowsRuntimeFactoryTableEntry; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const Il2CppMethodPointer* methodPointers; + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + uint32_t unresolvedVirtualCallCount; + const Il2CppMethodPointer* unresolvedVirtualCallPointers; + uint32_t interopDataCount; + Il2CppInteropData* interopData; + uint32_t windowsRuntimeFactoryCount; + Il2CppWindowsRuntimeFactoryTableEntry* windowsRuntimeFactoryTable; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + uint8_t enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/2019.1.0f2/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/2019.1.0f2/il2cpp-api-functions.h new file mode 100644 index 00000000..86581492 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2019.1.0f2/il2cpp-api-functions.h @@ -0,0 +1,283 @@ +#ifndef DO_API_NO_RETURN +#define DO_API_NO_RETURN(r, n, p) DO_API(r,n,p) +#endif + +DO_API(int, il2cpp_init, (const char* domain_name)); +DO_API(int, il2cpp_init_utf16, (const Il2CppChar * domain_name)); +DO_API(void, il2cpp_shutdown, ()); +DO_API(void, il2cpp_set_config_dir, (const char *config_path)); +DO_API(void, il2cpp_set_data_dir, (const char *data_path)); +DO_API(void, il2cpp_set_temp_dir, (const char *temp_path)); +DO_API(void, il2cpp_set_commandline_arguments, (int argc, const char* const argv[], const char* basedir)); +DO_API(void, il2cpp_set_commandline_arguments_utf16, (int argc, const Il2CppChar * const argv[], const char* basedir)); +DO_API(void, il2cpp_set_config_utf16, (const Il2CppChar * executablePath)); +DO_API(void, il2cpp_set_config, (const char* executablePath)); + +DO_API(void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks * callbacks)); +DO_API(const Il2CppImage*, il2cpp_get_corlib, ()); +DO_API(void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method)); +DO_API(Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name)); + +DO_API(void*, il2cpp_alloc, (size_t size)); +DO_API(void, il2cpp_free, (void* ptr)); + +// array +DO_API(Il2CppClass*, il2cpp_array_class_get, (Il2CppClass * element_class, uint32_t rank)); +DO_API(uint32_t, il2cpp_array_length, (Il2CppArray * array)); +DO_API(uint32_t, il2cpp_array_get_byte_length, (Il2CppArray * array)); +DO_API(Il2CppArray*, il2cpp_array_new, (Il2CppClass * elementTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass * arrayTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_full, (Il2CppClass * array_class, il2cpp_array_size_t * lengths, il2cpp_array_size_t * lower_bounds)); +DO_API(Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass * element_class, uint32_t rank, bool bounded)); +DO_API(int, il2cpp_array_element_size, (const Il2CppClass * array_class)); + +// assembly +DO_API(const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly * assembly)); + +// class +DO_API(const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_generic, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_inflated, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_assignable_from, (Il2CppClass * klass, Il2CppClass * oklass)); +DO_API(bool, il2cpp_class_is_subclass_of, (Il2CppClass * klass, Il2CppClass * klassc, bool check_interfaces)); +DO_API(bool, il2cpp_class_has_parent, (Il2CppClass * klass, Il2CppClass * klassc)); +DO_API(Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage * image, const char* namespaze, const char *name)); +DO_API(Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType * type)); +DO_API(Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass * klass)); +DO_API(const EventInfo*, il2cpp_class_get_events, (Il2CppClass * klass, void* *iter)); +DO_API(FieldInfo*, il2cpp_class_get_fields, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass * klass, const char *name)); +DO_API(FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass * klass, const char *name)); +DO_API(const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass * klass, void* *iter)); +DO_API(const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass * klass, const char* name, int argsCount)); +DO_API(const char*, il2cpp_class_get_name, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_namespace, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_instance_size, (Il2CppClass * klass)); +DO_API(size_t, il2cpp_class_num_fields, (const Il2CppClass * enumKlass)); +DO_API(bool, il2cpp_class_is_valuetype, (const Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_value_size, (Il2CppClass * klass, uint32_t * align)); +DO_API(bool, il2cpp_class_is_blittable, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_flags, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_abstract, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_interface, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_array_element_size, (const Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_from_type, (const Il2CppType * type)); +DO_API(const Il2CppType*, il2cpp_class_get_type, (Il2CppClass * klass)); +DO_API(uint32_t, il2cpp_class_get_type_token, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_has_attribute, (Il2CppClass * klass, Il2CppClass * attr_class)); +DO_API(bool, il2cpp_class_has_references, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_enum, (const Il2CppClass * klass)); +DO_API(const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_assemblyname, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_rank, (const Il2CppClass * klass)); + +// testing only +DO_API(size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass * klass)); +DO_API(void, il2cpp_class_get_bitmap, (Il2CppClass * klass, size_t * bitmap)); + +// stats +DO_API(bool, il2cpp_stats_dump_to_file, (const char *path)); +DO_API(uint64_t, il2cpp_stats_get_value, (Il2CppStat stat)); + +// domain +DO_API(Il2CppDomain*, il2cpp_domain_get, ()); +DO_API(const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain * domain, const char* name)); +DO_API(const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain * domain, size_t * size)); + +// exception +DO_API_NO_RETURN(void, il2cpp_raise_exception, (Il2CppException*)); +DO_API(Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage * image, const char *name_space, const char *name, const char *msg)); +DO_API(Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg)); +DO_API(void, il2cpp_format_exception, (const Il2CppException * ex, char* message, int message_size)); +DO_API(void, il2cpp_format_stack_trace, (const Il2CppException * ex, char* output, int output_size)); +DO_API(void, il2cpp_unhandled_exception, (Il2CppException*)); + +// field +DO_API(int, il2cpp_field_get_flags, (FieldInfo * field)); +DO_API(const char*, il2cpp_field_get_name, (FieldInfo * field)); +DO_API(Il2CppClass*, il2cpp_field_get_parent, (FieldInfo * field)); +DO_API(size_t, il2cpp_field_get_offset, (FieldInfo * field)); +DO_API(const Il2CppType*, il2cpp_field_get_type, (FieldInfo * field)); +DO_API(void, il2cpp_field_get_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo * field, Il2CppObject * obj)); +DO_API(bool, il2cpp_field_has_attribute, (FieldInfo * field, Il2CppClass * attr_class)); +DO_API(void, il2cpp_field_set_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_get_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_set_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_set_value_object, (Il2CppObject * instance, FieldInfo * field, Il2CppObject * value)); + +// gc +DO_API(void, il2cpp_gc_collect, (int maxGenerations)); +DO_API(int32_t, il2cpp_gc_collect_a_little, ()); +DO_API(void, il2cpp_gc_disable, ()); +DO_API(void, il2cpp_gc_enable, ()); +DO_API(bool, il2cpp_gc_is_disabled, ()); +DO_API(int64_t, il2cpp_gc_get_max_time_slice_ns, ()); +DO_API(void, il2cpp_gc_set_max_time_slice_ns, (int64_t maxTimeSlice)); +DO_API(bool, il2cpp_gc_is_incremental, ()); +DO_API(int64_t, il2cpp_gc_get_used_size, ()); +DO_API(int64_t, il2cpp_gc_get_heap_size, ()); +DO_API(void, il2cpp_gc_wbarrier_set_field, (Il2CppObject * obj, void **targetAddress, void *object)); +DO_API(bool, il2cpp_gc_has_strict_wbarriers, ()); +DO_API(void, il2cpp_gc_set_external_allocation_tracker, (void(*func)(void*, size_t, int))); +DO_API(void, il2cpp_gc_set_external_wbarrier_tracker, (void(*func)(void**))); + +// gchandle +DO_API(uint32_t, il2cpp_gchandle_new, (Il2CppObject * obj, bool pinned)); +DO_API(uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject * obj, bool track_resurrection)); +DO_API(Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle)); +DO_API(void, il2cpp_gchandle_free, (uint32_t gchandle)); + +// liveness +DO_API(void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass * filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped)); +DO_API(void, il2cpp_unity_liveness_calculation_end, (void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject * root, void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_statics, (void* state)); + +// method +DO_API(const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo * method)); +DO_API(Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_name, (const MethodInfo * method)); +DO_API(const MethodInfo*, il2cpp_method_get_from_reflection, (const Il2CppReflectionMethod * method)); +DO_API(Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo * method, Il2CppClass * refclass)); +DO_API(bool, il2cpp_method_is_generic, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_inflated, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_instance, (const MethodInfo * method)); +DO_API(uint32_t, il2cpp_method_get_param_count, (const MethodInfo * method)); +DO_API(const Il2CppType*, il2cpp_method_get_param, (const MethodInfo * method, uint32_t index)); +DO_API(Il2CppClass*, il2cpp_method_get_class, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_has_attribute, (const MethodInfo * method, Il2CppClass * attr_class)); +DO_API(uint32_t, il2cpp_method_get_flags, (const MethodInfo * method, uint32_t * iflags)); +DO_API(uint32_t, il2cpp_method_get_token, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_param_name, (const MethodInfo * method, uint32_t index)); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API(void, il2cpp_profiler_install, (Il2CppProfiler * prof, Il2CppProfileFunc shutdown_callback)); +DO_API(void, il2cpp_profiler_set_events, (Il2CppProfileFlags events)); +DO_API(void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave)); +DO_API(void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback)); +DO_API(void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback)); +DO_API(void, il2cpp_profiler_install_fileio, (Il2CppProfileFileIOFunc callback)); +DO_API(void, il2cpp_profiler_install_thread, (Il2CppProfileThreadFunc start, Il2CppProfileThreadFunc end)); + +#endif + +// property +DO_API(uint32_t, il2cpp_property_get_flags, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo * prop)); +DO_API(const char*, il2cpp_property_get_name, (PropertyInfo * prop)); +DO_API(Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo * prop)); + +// object +DO_API(Il2CppClass*, il2cpp_object_get_class, (Il2CppObject * obj)); +DO_API(uint32_t, il2cpp_object_get_size, (Il2CppObject * obj)); +DO_API(const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject * obj, const MethodInfo * method)); +DO_API(Il2CppObject*, il2cpp_object_new, (const Il2CppClass * klass)); +DO_API(void*, il2cpp_object_unbox, (Il2CppObject * obj)); + +DO_API(Il2CppObject*, il2cpp_value_box, (Il2CppClass * klass, void* data)); + +// monitor +DO_API(void, il2cpp_monitor_enter, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_enter, (Il2CppObject * obj, uint32_t timeout)); +DO_API(void, il2cpp_monitor_exit, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse_all, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_wait, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_wait, (Il2CppObject * obj, uint32_t timeout)); + +// runtime +DO_API(Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo * method, void *obj, void **params, Il2CppException **exc)); +DO_API(Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo * method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc)); +DO_API(void, il2cpp_runtime_class_init, (Il2CppClass * klass)); +DO_API(void, il2cpp_runtime_object_init, (Il2CppObject * obj)); + +DO_API(void, il2cpp_runtime_object_init_exception, (Il2CppObject * obj, Il2CppException** exc)); + +DO_API(void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value)); + +// string +DO_API(int32_t, il2cpp_string_length, (Il2CppString * str)); +DO_API(Il2CppChar*, il2cpp_string_chars, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_new, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length)); +DO_API(Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar * text, int32_t len)); +DO_API(Il2CppString*, il2cpp_string_new_wrapper, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_intern, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_is_interned, (Il2CppString * str)); + +// thread +DO_API(Il2CppThread*, il2cpp_thread_current, ()); +DO_API(Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain * domain)); +DO_API(void, il2cpp_thread_detach, (Il2CppThread * thread)); + +DO_API(Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t * size)); +DO_API(bool, il2cpp_is_vm_thread, (Il2CppThread * thread)); + +// stacktrace +DO_API(void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data)); +DO_API(void, il2cpp_thread_walk_frame_stack, (Il2CppThread * thread, Il2CppFrameWalkFunc func, void* user_data)); +DO_API(bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_thread_get_top_frame, (Il2CppThread * thread, Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_thread_get_frame_at, (Il2CppThread * thread, int32_t offset, Il2CppStackFrameInfo * frame)); +DO_API(int32_t, il2cpp_current_thread_get_stack_depth, ()); +DO_API(int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread * thread)); + +// type +DO_API(Il2CppObject*, il2cpp_type_get_object, (const Il2CppType * type)); +DO_API(int, il2cpp_type_get_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType * type)); +DO_API(char*, il2cpp_type_get_name, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_is_byref, (const Il2CppType * type)); +DO_API(uint32_t, il2cpp_type_get_attrs, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_equals, (const Il2CppType * type, const Il2CppType * otherType)); +DO_API(char*, il2cpp_type_get_assembly_qualified_name, (const Il2CppType * type)); + +// image +DO_API(const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_name, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_filename, (const Il2CppImage * image)); +DO_API(const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage * image)); + +DO_API(size_t, il2cpp_image_get_class_count, (const Il2CppImage * image)); +DO_API(const Il2CppClass*, il2cpp_image_get_class, (const Il2CppImage * image, size_t index)); + +// Memory information +DO_API(Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, ()); +DO_API(void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot * snapshot)); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +// Logging +DO_API(void, il2cpp_register_log_callback, (Il2CppLogCallback method)); + +// Debugger +DO_API(void, il2cpp_debugger_set_agent_options, (const char* options)); +DO_API(bool, il2cpp_is_debugger_attached, ()); + +// TLS module +DO_API(void, il2cpp_unity_install_unitytls_interface, (const void* unitytlsInterfaceStruct)); + +// custom attributes +DO_API(Il2CppCustomAttrInfo*, il2cpp_custom_attrs_from_class, (Il2CppClass * klass)); +DO_API(Il2CppCustomAttrInfo*, il2cpp_custom_attrs_from_method, (const MethodInfo * method)); + +DO_API(Il2CppObject*, il2cpp_custom_attrs_get_attr, (Il2CppCustomAttrInfo * ainfo, Il2CppClass * attr_klass)); +DO_API(bool, il2cpp_custom_attrs_has_attr, (Il2CppCustomAttrInfo * ainfo, Il2CppClass * attr_klass)); +DO_API(Il2CppArray*, il2cpp_custom_attrs_construct, (Il2CppCustomAttrInfo * cinfo)); + +DO_API(void, il2cpp_custom_attrs_free, (Il2CppCustomAttrInfo * ainfo)); + +// Il2CppClass user data for GetComponent optimization +DO_API(void, il2cpp_class_set_userdata, (Il2CppClass * klass, void* userdata)); +DO_API(int, il2cpp_class_get_userdata_offset, ()); diff --git a/module/src/main/cpp/il2cppapi/2019.1.0f2/il2cpp-class.h b/module/src/main/cpp/il2cppapi/2019.1.0f2/il2cpp-class.h new file mode 100644 index 00000000..bd0c7537 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2019.1.0f2/il2cpp-class.h @@ -0,0 +1,1368 @@ +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef struct Il2CppCustomAttrInfo Il2CppCustomAttrInfo; +typedef enum +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19, + IL2CPP_PROFILE_FILEIO = 1 << 20 +} Il2CppProfileFlags; +typedef enum +{ + IL2CPP_PROFILE_FILEIO_WRITE = 0, + IL2CPP_PROFILE_FILEIO_READ +} Il2CppProfileFileIOKind; +typedef enum +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct +{ + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef void (*Il2CppProfileFileIOFunc) (Il2CppProfiler* prof, Il2CppProfileFileIOKind kind, int count); +typedef void (*Il2CppProfileThreadFunc) (Il2CppProfiler *prof, unsigned long tid); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef void (*Il2CppLogCallback)(const char*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef uintptr_t il2cpp_array_size_t; +typedef uint32_t Il2CppMethodSlot; +static const uint32_t kInvalidIl2CppMethodSlot = 65535; +static const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST = 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef enum +{ + IL2CPP_TOKEN_MODULE = 0x00000000, + IL2CPP_TOKEN_TYPE_REF = 0x01000000, + IL2CPP_TOKEN_TYPE_DEF = 0x02000000, + IL2CPP_TOKEN_FIELD_DEF = 0x04000000, + IL2CPP_TOKEN_METHOD_DEF = 0x06000000, + IL2CPP_TOKEN_PARAM_DEF = 0x08000000, + IL2CPP_TOKEN_INTERFACE_IMPL = 0x09000000, + IL2CPP_TOKEN_MEMBER_REF = 0x0a000000, + IL2CPP_TOKEN_CUSTOM_ATTRIBUTE = 0x0c000000, + IL2CPP_TOKEN_PERMISSION = 0x0e000000, + IL2CPP_TOKEN_SIGNATURE = 0x11000000, + IL2CPP_TOKEN_EVENT = 0x14000000, + IL2CPP_TOKEN_PROPERTY = 0x17000000, + IL2CPP_TOKEN_MODULE_REF = 0x1a000000, + IL2CPP_TOKEN_TYPE_SPEC = 0x1b000000, + IL2CPP_TOKEN_ASSEMBLY = 0x20000000, + IL2CPP_TOKEN_ASSEMBLY_REF = 0x23000000, + IL2CPP_TOKEN_FILE = 0x26000000, + IL2CPP_TOKEN_EXPORTED_TYPE = 0x27000000, + IL2CPP_TOKEN_MANIFEST_RESOURCE = 0x28000000, + IL2CPP_TOKEN_GENERIC_PARAM = 0x2a000000, + IL2CPP_TOKEN_METHOD_SPEC = 0x2b000000, +} Il2CppTokenType; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t InteropDataIndex; +static const TypeIndex kTypeIndexInvalid = -1; +static const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +static const DefaultValueDataIndex kDefaultValueIndexNull = -1; +static const CustomAttributeIndex kCustomAttributeIndexInvalid = -1; +static const EventIndex kEventIndexInvalid = -1; +static const FieldIndex kFieldIndexInvalid = -1; +static const MethodIndex kMethodIndexInvalid = -1; +static const PropertyIndex kPropertyIndexInvalid = -1; +static const GenericContainerIndex kGenericContainerIndexInvalid = -1; +static const GenericParameterIndex kGenericParameterIndexInvalid = -1; +static const RGCTXIndex kRGCTXIndexInvalid = -1; +static const StringLiteralIndex kStringLiteralIndexInvalid = -1; +static const InteropDataIndex kInteropDataIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD, + IL2CPP_RGCTX_DATA_ARRAY, +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + GenericContainerIndex genericContainerIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + GenericContainerIndex genericContainerIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +static const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyNameDefinition +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t public_key_token[8]; +} Il2CppAssemblyNameDefinition; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + uint32_t token; + CustomAttributeIndex customAttributeStart; + uint32_t customAttributeCount; +} Il2CppImageDefinition; +typedef struct Il2CppAssemblyDefinition +{ + ImageIndex imageIndex; + uint32_t token; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyNameDefinition aname; +} Il2CppAssemblyDefinition; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + uint32_t token; + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +typedef struct Il2CppRange +{ + int32_t start; + int32_t length; +} Il2CppRange; +typedef struct Il2CppWindowsRuntimeTypeNamePair +{ + StringIndex nameIndex; + TypeIndex typeIndex; +} Il2CppWindowsRuntimeTypeNamePair; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; + int32_t unresolvedVirtualCallParameterTypesOffset; + int32_t unresolvedVirtualCallParameterTypesCount; + int32_t unresolvedVirtualCallParameterRangesOffset; + int32_t unresolvedVirtualCallParameterRangesCount; + int32_t windowsRuntimeTypeNamesOffset; + int32_t windowsRuntimeTypeNamesSize; + int32_t exportedTypeDefinitionsOffset; + int32_t exportedTypeDefinitionsCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union + { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum Il2CppCallConvention +{ + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE, + CHARSET_NOT_SPECIFIED +} Il2CppCharSet; +typedef struct Il2CppHString__ +{ + int unused; +} Il2CppHString__; +typedef Il2CppHString__* Il2CppHString; +typedef struct Il2CppHStringHeader +{ + union + { + void* Reserved1; + char Reserved2[24]; + } Reserved; +} Il2CppHStringHeader; +typedef struct Il2CppGuid +{ + uint32_t data1; + uint16_t data2; + uint16_t data3; + uint8_t data4[8]; +} Il2CppGuid; +typedef struct Il2CppSafeArrayBound +{ + uint32_t element_count; + int32_t lower_bound; +} Il2CppSafeArrayBound; +typedef struct Il2CppSafeArray +{ + uint16_t dimension_count; + uint16_t features; + uint32_t element_size; + uint32_t lock_count; + void* data; + Il2CppSafeArrayBound bounds[1]; +} Il2CppSafeArray; +typedef struct Il2CppWin32Decimal +{ + uint16_t reserved; + union + { + struct + { + uint8_t scale; + uint8_t sign; + } s; + uint16_t signscale; + } u; + uint32_t hi32; + union + { + struct + { + uint32_t lo32; + uint32_t mid32; + } s2; + uint64_t lo64; + } u2; +} Il2CppWin32Decimal; +typedef int16_t IL2CPP_VARIANT_BOOL; +typedef enum Il2CppVarType +{ + IL2CPP_VT_EMPTY = 0, + IL2CPP_VT_NULL = 1, + IL2CPP_VT_I2 = 2, + IL2CPP_VT_I4 = 3, + IL2CPP_VT_R4 = 4, + IL2CPP_VT_R8 = 5, + IL2CPP_VT_CY = 6, + IL2CPP_VT_DATE = 7, + IL2CPP_VT_BSTR = 8, + IL2CPP_VT_DISPATCH = 9, + IL2CPP_VT_ERROR = 10, + IL2CPP_VT_BOOL = 11, + IL2CPP_VT_VARIANT = 12, + IL2CPP_VT_UNKNOWN = 13, + IL2CPP_VT_DECIMAL = 14, + IL2CPP_VT_I1 = 16, + IL2CPP_VT_UI1 = 17, + IL2CPP_VT_UI2 = 18, + IL2CPP_VT_UI4 = 19, + IL2CPP_VT_I8 = 20, + IL2CPP_VT_UI8 = 21, + IL2CPP_VT_INT = 22, + IL2CPP_VT_UINT = 23, + IL2CPP_VT_VOID = 24, + IL2CPP_VT_HRESULT = 25, + IL2CPP_VT_PTR = 26, + IL2CPP_VT_SAFEARRAY = 27, + IL2CPP_VT_CARRAY = 28, + IL2CPP_VT_USERDEFINED = 29, + IL2CPP_VT_LPSTR = 30, + IL2CPP_VT_LPWSTR = 31, + IL2CPP_VT_RECORD = 36, + IL2CPP_VT_INT_PTR = 37, + IL2CPP_VT_UINT_PTR = 38, + IL2CPP_VT_FILETIME = 64, + IL2CPP_VT_BLOB = 65, + IL2CPP_VT_STREAM = 66, + IL2CPP_VT_STORAGE = 67, + IL2CPP_VT_STREAMED_OBJECT = 68, + IL2CPP_VT_STORED_OBJECT = 69, + IL2CPP_VT_BLOB_OBJECT = 70, + IL2CPP_VT_CF = 71, + IL2CPP_VT_CLSID = 72, + IL2CPP_VT_VERSIONED_STREAM = 73, + IL2CPP_VT_BSTR_BLOB = 0xfff, + IL2CPP_VT_VECTOR = 0x1000, + IL2CPP_VT_ARRAY = 0x2000, + IL2CPP_VT_BYREF = 0x4000, + IL2CPP_VT_RESERVED = 0x8000, + IL2CPP_VT_ILLEGAL = 0xffff, + IL2CPP_VT_ILLEGALMASKED = 0xfff, + IL2CPP_VT_TYPEMASK = 0xfff, +} Il2CppVarType; +typedef struct Il2CppVariant Il2CppVariant; +typedef struct Il2CppIUnknown Il2CppIUnknown; +typedef struct Il2CppVariant +{ + union + { + struct __tagVARIANT + { + uint16_t type; + uint16_t reserved1; + uint16_t reserved2; + uint16_t reserved3; + union + { + int64_t llVal; + int32_t lVal; + uint8_t bVal; + int16_t iVal; + float fltVal; + double dblVal; + IL2CPP_VARIANT_BOOL boolVal; + int32_t scode; + int64_t cyVal; + double date; + Il2CppChar* bstrVal; + Il2CppIUnknown* punkVal; + void* pdispVal; + Il2CppSafeArray* parray; + uint8_t* pbVal; + int16_t* piVal; + int32_t* plVal; + int64_t* pllVal; + float* pfltVal; + double* pdblVal; + IL2CPP_VARIANT_BOOL* pboolVal; + int32_t* pscode; + int64_t* pcyVal; + double* pdate; + Il2CppChar* pbstrVal; + Il2CppIUnknown** ppunkVal; + void** ppdispVal; + Il2CppSafeArray** pparray; + Il2CppVariant* pvarVal; + void* byref; + char cVal; + uint16_t uiVal; + uint32_t ulVal; + uint64_t ullVal; + int intVal; + unsigned int uintVal; + Il2CppWin32Decimal* pdecVal; + char* pcVal; + uint16_t* puiVal; + uint32_t* pulVal; + uint64_t* pullVal; + int* pintVal; + unsigned int* puintVal; + struct __tagBRECORD + { + void* pvRecord; + void* pRecInfo; + } n4; + } n3; + } n2; + Il2CppWin32Decimal decVal; + } n1; +} Il2CppVariant; +typedef struct Il2CppFileTime +{ + uint32_t low; + uint32_t high; +} Il2CppFileTime; +typedef struct Il2CppStatStg +{ + Il2CppChar* name; + uint32_t type; + uint64_t size; + Il2CppFileTime mtime; + Il2CppFileTime ctime; + Il2CppFileTime atime; + uint32_t mode; + uint32_t locks; + Il2CppGuid clsid; + uint32_t state; + uint32_t reserved; +} Il2CppStatStg; +typedef enum Il2CppWindowsRuntimeTypeKind +{ + kTypeKindPrimitive = 0, + kTypeKindMetadata, + kTypeKindCustom +} Il2CppWindowsRuntimeTypeKind; +typedef struct Il2CppWindowsRuntimeTypeName +{ + Il2CppHString typeName; + enum Il2CppWindowsRuntimeTypeKind typeKind; +} Il2CppWindowsRuntimeTypeName; +typedef void (*PInvokeMarshalToNativeFunc)(void* managedStructure, void* marshaledStructure); +typedef void (*PInvokeMarshalFromNativeFunc)(void* marshaledStructure, void* managedStructure); +typedef void (*PInvokeMarshalCleanupFunc)(void* marshaledStructure); +typedef struct Il2CppIUnknown* (*CreateCCWFunc)(Il2CppObject* obj); +typedef struct Il2CppInteropData +{ + Il2CppMethodPointer delegatePInvokeWrapperFunction; + PInvokeMarshalToNativeFunc pinvokeMarshalToNativeFunction; + PInvokeMarshalFromNativeFunc pinvokeMarshalFromNativeFunction; + PInvokeMarshalCleanupFunc pinvokeMarshalCleanupFunction; + CreateCCWFunc createCCWFunction; + const Il2CppGuid* guid; + const Il2CppType* type; +} Il2CppInteropData; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppAppDomainSetup Il2CppAppDomainSetup; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct Il2CppCodeGenModule Il2CppCodeGenModule; +typedef struct VirtualInvokeData +{ + Il2CppMethodPointer methodPtr; + const MethodInfo* method; +} VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +typedef struct Il2CppDefaults +{ + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *attribute_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; + Il2CppClass* ireference_class; + Il2CppClass* ireferencearray_class; + Il2CppClass* ikey_value_pair_class; + Il2CppClass* key_value_pair_class; + Il2CppClass* windows_foundation_uri_class; + Il2CppClass* windows_foundation_iuri_runtime_class_class; + Il2CppClass* system_uri_class; + Il2CppClass* system_guid_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(Il2CppMethodPointer, const MethodInfo*, void*, void**); +typedef enum MethodVariableKind +{ + kMethodVariableKind_This, + kMethodVariableKind_Parameter, + kMethodVariableKind_LocalVariable +} MethodVariableKind; +typedef enum SequencePointKind +{ + kSequencePointKind_Normal, + kSequencePointKind_StepOut +} SequencePointKind; +typedef struct Il2CppMethodExecutionContextInfo +{ + TypeIndex typeIndex; + int32_t nameIndex; + int32_t scopeIndex; +} Il2CppMethodExecutionContextInfo; +typedef struct Il2CppMethodExecutionContextInfoIndex +{ + int32_t startIndex; + int32_t count; +} Il2CppMethodExecutionContextInfoIndex; +typedef struct Il2CppMethodScope +{ + int32_t startOffset; + int32_t endOffset; +} Il2CppMethodScope; +typedef struct Il2CppMethodHeaderInfo +{ + int32_t codeSize; + int32_t startScope; + int32_t numScopes; +} Il2CppMethodHeaderInfo; +typedef struct Il2CppSequencePointSourceFile +{ + const char *file; + uint8_t hash[16]; +} Il2CppSequencePointSourceFile; +typedef struct Il2CppTypeSourceFilePair +{ + TypeDefinitionIndex klassIndex; + int32_t sourceFileIndex; +} Il2CppTypeSourceFilePair; +typedef struct Il2CppSequencePoint +{ + MethodIndex methodDefinitionIndex; + TypeIndex catchTypeIndex; + int32_t sourceFileIndex; + int32_t lineStart, lineEnd; + int32_t columnStart, columnEnd; + int32_t ilOffset; + SequencePointKind kind; + uint8_t isActive; + int32_t id; + uint8_t tryDepth; +} Il2CppSequencePoint; +typedef struct Il2CppDebuggerMetadataRegistration +{ + Il2CppMethodExecutionContextInfo* methodExecutionContextInfos; + Il2CppMethodExecutionContextInfoIndex* methodExecutionContextInfoIndexes; + Il2CppMethodScope* methodScopes; + Il2CppMethodHeaderInfo* methodHeaderInfos; + Il2CppSequencePointSourceFile* sequencePointSourceFiles; + int32_t numSequencePoints; + Il2CppSequencePoint* sequencePoints; + int32_t numTypeSourceFileEntries; + Il2CppTypeSourceFilePair* typeSourceFiles; + const char** methodExecutionContextInfoStrings; +} Il2CppDebuggerMetadataRegistration; +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *klass; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; + uint8_t wrapper_type : 1; + uint8_t is_marshaled_from_native : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + Il2CppType byval_arg; + Il2CppType this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + const Il2CppInteropData* interopData; + Il2CppClass* klass; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + void *unity_user_data; + uint32_t initializationExceptionGCHandle; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) size_t cctor_thread; + GenericContainerIndex genericContainerIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t genericRecursionDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t naturalAligment; + uint8_t packingSize; + uint8_t initialized_and_no_error : 1; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; + uint8_t is_vtable_initialized : 1; + uint8_t has_initialization_error : 1; + VirtualInvokeData vtable[32]; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppAppDomainSetup* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; + void* agent_info; +} Il2CppDomain; +typedef struct Il2CppAssemblyName +{ + const char* name; + const char* culture; + const char* hash_value; + const char* public_key; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t public_key_token[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImage +{ + const char* name; + const char *nameNoExt; + Il2CppAssembly* assembly; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + CustomAttributeIndex customAttributeStart; + uint32_t customAttributeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable * nameToClassHashTable; + const Il2CppCodeGenModule* codeGenModule; + uint32_t token; + uint8_t dynamic; +} Il2CppImage; +typedef struct Il2CppAssembly +{ + Il2CppImage* image; + uint32_t token; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppCodeGenOptions +{ + uint8_t enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppTokenIndexPair +{ + uint32_t token; + int32_t index; +} Il2CppTokenIndexPair; +typedef struct Il2CppTokenRangePair +{ + uint32_t token; + Il2CppRange range; +} Il2CppTokenRangePair; +typedef struct Il2CppCodeGenModule +{ + const char* moduleName; + const uint32_t methodPointerCount; + const Il2CppMethodPointer* methodPointers; + const int32_t* invokerIndices; + const uint32_t reversePInvokeWrapperCount; + const Il2CppTokenIndexPair* reversePInvokeWrapperIndices; + const uint32_t rgctxRangesCount; + const Il2CppTokenRangePair* rgctxRanges; + const uint32_t rgctxsCount; + const Il2CppRGCTXDefinition* rgctxs; + const Il2CppDebuggerMetadataRegistration *debuggerMetadata; +} Il2CppCodeGenModule; +typedef struct Il2CppCodeRegistration +{ + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + uint32_t unresolvedVirtualCallCount; + const Il2CppMethodPointer* unresolvedVirtualCallPointers; + uint32_t interopDataCount; + Il2CppInteropData* interopData; + uint32_t codeGenModulesCount; + const Il2CppCodeGenModule** codeGenModules; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + uint8_t enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/2019.2.0f1/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/2019.2.0f1/il2cpp-api-functions.h new file mode 100644 index 00000000..86581492 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2019.2.0f1/il2cpp-api-functions.h @@ -0,0 +1,283 @@ +#ifndef DO_API_NO_RETURN +#define DO_API_NO_RETURN(r, n, p) DO_API(r,n,p) +#endif + +DO_API(int, il2cpp_init, (const char* domain_name)); +DO_API(int, il2cpp_init_utf16, (const Il2CppChar * domain_name)); +DO_API(void, il2cpp_shutdown, ()); +DO_API(void, il2cpp_set_config_dir, (const char *config_path)); +DO_API(void, il2cpp_set_data_dir, (const char *data_path)); +DO_API(void, il2cpp_set_temp_dir, (const char *temp_path)); +DO_API(void, il2cpp_set_commandline_arguments, (int argc, const char* const argv[], const char* basedir)); +DO_API(void, il2cpp_set_commandline_arguments_utf16, (int argc, const Il2CppChar * const argv[], const char* basedir)); +DO_API(void, il2cpp_set_config_utf16, (const Il2CppChar * executablePath)); +DO_API(void, il2cpp_set_config, (const char* executablePath)); + +DO_API(void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks * callbacks)); +DO_API(const Il2CppImage*, il2cpp_get_corlib, ()); +DO_API(void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method)); +DO_API(Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name)); + +DO_API(void*, il2cpp_alloc, (size_t size)); +DO_API(void, il2cpp_free, (void* ptr)); + +// array +DO_API(Il2CppClass*, il2cpp_array_class_get, (Il2CppClass * element_class, uint32_t rank)); +DO_API(uint32_t, il2cpp_array_length, (Il2CppArray * array)); +DO_API(uint32_t, il2cpp_array_get_byte_length, (Il2CppArray * array)); +DO_API(Il2CppArray*, il2cpp_array_new, (Il2CppClass * elementTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass * arrayTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_full, (Il2CppClass * array_class, il2cpp_array_size_t * lengths, il2cpp_array_size_t * lower_bounds)); +DO_API(Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass * element_class, uint32_t rank, bool bounded)); +DO_API(int, il2cpp_array_element_size, (const Il2CppClass * array_class)); + +// assembly +DO_API(const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly * assembly)); + +// class +DO_API(const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_generic, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_inflated, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_assignable_from, (Il2CppClass * klass, Il2CppClass * oklass)); +DO_API(bool, il2cpp_class_is_subclass_of, (Il2CppClass * klass, Il2CppClass * klassc, bool check_interfaces)); +DO_API(bool, il2cpp_class_has_parent, (Il2CppClass * klass, Il2CppClass * klassc)); +DO_API(Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage * image, const char* namespaze, const char *name)); +DO_API(Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType * type)); +DO_API(Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass * klass)); +DO_API(const EventInfo*, il2cpp_class_get_events, (Il2CppClass * klass, void* *iter)); +DO_API(FieldInfo*, il2cpp_class_get_fields, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass * klass, const char *name)); +DO_API(FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass * klass, const char *name)); +DO_API(const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass * klass, void* *iter)); +DO_API(const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass * klass, const char* name, int argsCount)); +DO_API(const char*, il2cpp_class_get_name, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_namespace, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_instance_size, (Il2CppClass * klass)); +DO_API(size_t, il2cpp_class_num_fields, (const Il2CppClass * enumKlass)); +DO_API(bool, il2cpp_class_is_valuetype, (const Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_value_size, (Il2CppClass * klass, uint32_t * align)); +DO_API(bool, il2cpp_class_is_blittable, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_flags, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_abstract, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_interface, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_array_element_size, (const Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_from_type, (const Il2CppType * type)); +DO_API(const Il2CppType*, il2cpp_class_get_type, (Il2CppClass * klass)); +DO_API(uint32_t, il2cpp_class_get_type_token, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_has_attribute, (Il2CppClass * klass, Il2CppClass * attr_class)); +DO_API(bool, il2cpp_class_has_references, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_enum, (const Il2CppClass * klass)); +DO_API(const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_assemblyname, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_rank, (const Il2CppClass * klass)); + +// testing only +DO_API(size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass * klass)); +DO_API(void, il2cpp_class_get_bitmap, (Il2CppClass * klass, size_t * bitmap)); + +// stats +DO_API(bool, il2cpp_stats_dump_to_file, (const char *path)); +DO_API(uint64_t, il2cpp_stats_get_value, (Il2CppStat stat)); + +// domain +DO_API(Il2CppDomain*, il2cpp_domain_get, ()); +DO_API(const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain * domain, const char* name)); +DO_API(const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain * domain, size_t * size)); + +// exception +DO_API_NO_RETURN(void, il2cpp_raise_exception, (Il2CppException*)); +DO_API(Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage * image, const char *name_space, const char *name, const char *msg)); +DO_API(Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg)); +DO_API(void, il2cpp_format_exception, (const Il2CppException * ex, char* message, int message_size)); +DO_API(void, il2cpp_format_stack_trace, (const Il2CppException * ex, char* output, int output_size)); +DO_API(void, il2cpp_unhandled_exception, (Il2CppException*)); + +// field +DO_API(int, il2cpp_field_get_flags, (FieldInfo * field)); +DO_API(const char*, il2cpp_field_get_name, (FieldInfo * field)); +DO_API(Il2CppClass*, il2cpp_field_get_parent, (FieldInfo * field)); +DO_API(size_t, il2cpp_field_get_offset, (FieldInfo * field)); +DO_API(const Il2CppType*, il2cpp_field_get_type, (FieldInfo * field)); +DO_API(void, il2cpp_field_get_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo * field, Il2CppObject * obj)); +DO_API(bool, il2cpp_field_has_attribute, (FieldInfo * field, Il2CppClass * attr_class)); +DO_API(void, il2cpp_field_set_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_get_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_set_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_set_value_object, (Il2CppObject * instance, FieldInfo * field, Il2CppObject * value)); + +// gc +DO_API(void, il2cpp_gc_collect, (int maxGenerations)); +DO_API(int32_t, il2cpp_gc_collect_a_little, ()); +DO_API(void, il2cpp_gc_disable, ()); +DO_API(void, il2cpp_gc_enable, ()); +DO_API(bool, il2cpp_gc_is_disabled, ()); +DO_API(int64_t, il2cpp_gc_get_max_time_slice_ns, ()); +DO_API(void, il2cpp_gc_set_max_time_slice_ns, (int64_t maxTimeSlice)); +DO_API(bool, il2cpp_gc_is_incremental, ()); +DO_API(int64_t, il2cpp_gc_get_used_size, ()); +DO_API(int64_t, il2cpp_gc_get_heap_size, ()); +DO_API(void, il2cpp_gc_wbarrier_set_field, (Il2CppObject * obj, void **targetAddress, void *object)); +DO_API(bool, il2cpp_gc_has_strict_wbarriers, ()); +DO_API(void, il2cpp_gc_set_external_allocation_tracker, (void(*func)(void*, size_t, int))); +DO_API(void, il2cpp_gc_set_external_wbarrier_tracker, (void(*func)(void**))); + +// gchandle +DO_API(uint32_t, il2cpp_gchandle_new, (Il2CppObject * obj, bool pinned)); +DO_API(uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject * obj, bool track_resurrection)); +DO_API(Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle)); +DO_API(void, il2cpp_gchandle_free, (uint32_t gchandle)); + +// liveness +DO_API(void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass * filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped)); +DO_API(void, il2cpp_unity_liveness_calculation_end, (void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject * root, void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_statics, (void* state)); + +// method +DO_API(const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo * method)); +DO_API(Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_name, (const MethodInfo * method)); +DO_API(const MethodInfo*, il2cpp_method_get_from_reflection, (const Il2CppReflectionMethod * method)); +DO_API(Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo * method, Il2CppClass * refclass)); +DO_API(bool, il2cpp_method_is_generic, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_inflated, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_instance, (const MethodInfo * method)); +DO_API(uint32_t, il2cpp_method_get_param_count, (const MethodInfo * method)); +DO_API(const Il2CppType*, il2cpp_method_get_param, (const MethodInfo * method, uint32_t index)); +DO_API(Il2CppClass*, il2cpp_method_get_class, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_has_attribute, (const MethodInfo * method, Il2CppClass * attr_class)); +DO_API(uint32_t, il2cpp_method_get_flags, (const MethodInfo * method, uint32_t * iflags)); +DO_API(uint32_t, il2cpp_method_get_token, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_param_name, (const MethodInfo * method, uint32_t index)); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API(void, il2cpp_profiler_install, (Il2CppProfiler * prof, Il2CppProfileFunc shutdown_callback)); +DO_API(void, il2cpp_profiler_set_events, (Il2CppProfileFlags events)); +DO_API(void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave)); +DO_API(void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback)); +DO_API(void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback)); +DO_API(void, il2cpp_profiler_install_fileio, (Il2CppProfileFileIOFunc callback)); +DO_API(void, il2cpp_profiler_install_thread, (Il2CppProfileThreadFunc start, Il2CppProfileThreadFunc end)); + +#endif + +// property +DO_API(uint32_t, il2cpp_property_get_flags, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo * prop)); +DO_API(const char*, il2cpp_property_get_name, (PropertyInfo * prop)); +DO_API(Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo * prop)); + +// object +DO_API(Il2CppClass*, il2cpp_object_get_class, (Il2CppObject * obj)); +DO_API(uint32_t, il2cpp_object_get_size, (Il2CppObject * obj)); +DO_API(const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject * obj, const MethodInfo * method)); +DO_API(Il2CppObject*, il2cpp_object_new, (const Il2CppClass * klass)); +DO_API(void*, il2cpp_object_unbox, (Il2CppObject * obj)); + +DO_API(Il2CppObject*, il2cpp_value_box, (Il2CppClass * klass, void* data)); + +// monitor +DO_API(void, il2cpp_monitor_enter, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_enter, (Il2CppObject * obj, uint32_t timeout)); +DO_API(void, il2cpp_monitor_exit, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse_all, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_wait, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_wait, (Il2CppObject * obj, uint32_t timeout)); + +// runtime +DO_API(Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo * method, void *obj, void **params, Il2CppException **exc)); +DO_API(Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo * method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc)); +DO_API(void, il2cpp_runtime_class_init, (Il2CppClass * klass)); +DO_API(void, il2cpp_runtime_object_init, (Il2CppObject * obj)); + +DO_API(void, il2cpp_runtime_object_init_exception, (Il2CppObject * obj, Il2CppException** exc)); + +DO_API(void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value)); + +// string +DO_API(int32_t, il2cpp_string_length, (Il2CppString * str)); +DO_API(Il2CppChar*, il2cpp_string_chars, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_new, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length)); +DO_API(Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar * text, int32_t len)); +DO_API(Il2CppString*, il2cpp_string_new_wrapper, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_intern, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_is_interned, (Il2CppString * str)); + +// thread +DO_API(Il2CppThread*, il2cpp_thread_current, ()); +DO_API(Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain * domain)); +DO_API(void, il2cpp_thread_detach, (Il2CppThread * thread)); + +DO_API(Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t * size)); +DO_API(bool, il2cpp_is_vm_thread, (Il2CppThread * thread)); + +// stacktrace +DO_API(void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data)); +DO_API(void, il2cpp_thread_walk_frame_stack, (Il2CppThread * thread, Il2CppFrameWalkFunc func, void* user_data)); +DO_API(bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_thread_get_top_frame, (Il2CppThread * thread, Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_thread_get_frame_at, (Il2CppThread * thread, int32_t offset, Il2CppStackFrameInfo * frame)); +DO_API(int32_t, il2cpp_current_thread_get_stack_depth, ()); +DO_API(int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread * thread)); + +// type +DO_API(Il2CppObject*, il2cpp_type_get_object, (const Il2CppType * type)); +DO_API(int, il2cpp_type_get_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType * type)); +DO_API(char*, il2cpp_type_get_name, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_is_byref, (const Il2CppType * type)); +DO_API(uint32_t, il2cpp_type_get_attrs, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_equals, (const Il2CppType * type, const Il2CppType * otherType)); +DO_API(char*, il2cpp_type_get_assembly_qualified_name, (const Il2CppType * type)); + +// image +DO_API(const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_name, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_filename, (const Il2CppImage * image)); +DO_API(const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage * image)); + +DO_API(size_t, il2cpp_image_get_class_count, (const Il2CppImage * image)); +DO_API(const Il2CppClass*, il2cpp_image_get_class, (const Il2CppImage * image, size_t index)); + +// Memory information +DO_API(Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, ()); +DO_API(void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot * snapshot)); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +// Logging +DO_API(void, il2cpp_register_log_callback, (Il2CppLogCallback method)); + +// Debugger +DO_API(void, il2cpp_debugger_set_agent_options, (const char* options)); +DO_API(bool, il2cpp_is_debugger_attached, ()); + +// TLS module +DO_API(void, il2cpp_unity_install_unitytls_interface, (const void* unitytlsInterfaceStruct)); + +// custom attributes +DO_API(Il2CppCustomAttrInfo*, il2cpp_custom_attrs_from_class, (Il2CppClass * klass)); +DO_API(Il2CppCustomAttrInfo*, il2cpp_custom_attrs_from_method, (const MethodInfo * method)); + +DO_API(Il2CppObject*, il2cpp_custom_attrs_get_attr, (Il2CppCustomAttrInfo * ainfo, Il2CppClass * attr_klass)); +DO_API(bool, il2cpp_custom_attrs_has_attr, (Il2CppCustomAttrInfo * ainfo, Il2CppClass * attr_klass)); +DO_API(Il2CppArray*, il2cpp_custom_attrs_construct, (Il2CppCustomAttrInfo * cinfo)); + +DO_API(void, il2cpp_custom_attrs_free, (Il2CppCustomAttrInfo * ainfo)); + +// Il2CppClass user data for GetComponent optimization +DO_API(void, il2cpp_class_set_userdata, (Il2CppClass * klass, void* userdata)); +DO_API(int, il2cpp_class_get_userdata_offset, ()); diff --git a/module/src/main/cpp/il2cppapi/2019.2.0f1/il2cpp-class.h b/module/src/main/cpp/il2cppapi/2019.2.0f1/il2cpp-class.h new file mode 100644 index 00000000..525cdb2b --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2019.2.0f1/il2cpp-class.h @@ -0,0 +1,1356 @@ +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef struct Il2CppCustomAttrInfo Il2CppCustomAttrInfo; +typedef enum +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19, + IL2CPP_PROFILE_FILEIO = 1 << 20 +} Il2CppProfileFlags; +typedef enum +{ + IL2CPP_PROFILE_FILEIO_WRITE = 0, + IL2CPP_PROFILE_FILEIO_READ +} Il2CppProfileFileIOKind; +typedef enum +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct +{ + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef void (*Il2CppProfileFileIOFunc) (Il2CppProfiler* prof, Il2CppProfileFileIOKind kind, int count); +typedef void (*Il2CppProfileThreadFunc) (Il2CppProfiler *prof, unsigned long tid); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef void (*Il2CppLogCallback)(const char*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef uintptr_t il2cpp_array_size_t; +typedef void ( *SynchronizationContextCallback)(intptr_t arg); +typedef uint32_t Il2CppMethodSlot; +static const uint32_t kInvalidIl2CppMethodSlot = 65535; +static const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST = 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef enum +{ + IL2CPP_TOKEN_MODULE = 0x00000000, + IL2CPP_TOKEN_TYPE_REF = 0x01000000, + IL2CPP_TOKEN_TYPE_DEF = 0x02000000, + IL2CPP_TOKEN_FIELD_DEF = 0x04000000, + IL2CPP_TOKEN_METHOD_DEF = 0x06000000, + IL2CPP_TOKEN_PARAM_DEF = 0x08000000, + IL2CPP_TOKEN_INTERFACE_IMPL = 0x09000000, + IL2CPP_TOKEN_MEMBER_REF = 0x0a000000, + IL2CPP_TOKEN_CUSTOM_ATTRIBUTE = 0x0c000000, + IL2CPP_TOKEN_PERMISSION = 0x0e000000, + IL2CPP_TOKEN_SIGNATURE = 0x11000000, + IL2CPP_TOKEN_EVENT = 0x14000000, + IL2CPP_TOKEN_PROPERTY = 0x17000000, + IL2CPP_TOKEN_MODULE_REF = 0x1a000000, + IL2CPP_TOKEN_TYPE_SPEC = 0x1b000000, + IL2CPP_TOKEN_ASSEMBLY = 0x20000000, + IL2CPP_TOKEN_ASSEMBLY_REF = 0x23000000, + IL2CPP_TOKEN_FILE = 0x26000000, + IL2CPP_TOKEN_EXPORTED_TYPE = 0x27000000, + IL2CPP_TOKEN_MANIFEST_RESOURCE = 0x28000000, + IL2CPP_TOKEN_GENERIC_PARAM = 0x2a000000, + IL2CPP_TOKEN_METHOD_SPEC = 0x2b000000, +} Il2CppTokenType; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t InteropDataIndex; +static const TypeIndex kTypeIndexInvalid = -1; +static const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +static const DefaultValueDataIndex kDefaultValueIndexNull = -1; +static const CustomAttributeIndex kCustomAttributeIndexInvalid = -1; +static const EventIndex kEventIndexInvalid = -1; +static const FieldIndex kFieldIndexInvalid = -1; +static const MethodIndex kMethodIndexInvalid = -1; +static const PropertyIndex kPropertyIndexInvalid = -1; +static const GenericContainerIndex kGenericContainerIndexInvalid = -1; +static const GenericParameterIndex kGenericParameterIndexInvalid = -1; +static const RGCTXIndex kRGCTXIndexInvalid = -1; +static const StringLiteralIndex kStringLiteralIndexInvalid = -1; +static const InteropDataIndex kInteropDataIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD, + IL2CPP_RGCTX_DATA_ARRAY, +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + GenericContainerIndex genericContainerIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + GenericContainerIndex genericContainerIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +static const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyNameDefinition +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t public_key_token[8]; +} Il2CppAssemblyNameDefinition; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + uint32_t token; + CustomAttributeIndex customAttributeStart; + uint32_t customAttributeCount; +} Il2CppImageDefinition; +typedef struct Il2CppAssemblyDefinition +{ + ImageIndex imageIndex; + uint32_t token; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyNameDefinition aname; +} Il2CppAssemblyDefinition; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + uint32_t token; + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +typedef struct Il2CppRange +{ + int32_t start; + int32_t length; +} Il2CppRange; +typedef struct Il2CppWindowsRuntimeTypeNamePair +{ + StringIndex nameIndex; + TypeIndex typeIndex; +} Il2CppWindowsRuntimeTypeNamePair; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; + int32_t unresolvedVirtualCallParameterTypesOffset; + int32_t unresolvedVirtualCallParameterTypesCount; + int32_t unresolvedVirtualCallParameterRangesOffset; + int32_t unresolvedVirtualCallParameterRangesCount; + int32_t windowsRuntimeTypeNamesOffset; + int32_t windowsRuntimeTypeNamesSize; + int32_t exportedTypeDefinitionsOffset; + int32_t exportedTypeDefinitionsCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union + { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum Il2CppCallConvention +{ + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE, + CHARSET_NOT_SPECIFIED +} Il2CppCharSet; +typedef struct Il2CppHString__ +{ + int unused; +} Il2CppHString__; +typedef Il2CppHString__* Il2CppHString; +typedef struct Il2CppHStringHeader +{ + union + { + void* Reserved1; + char Reserved2[24]; + } Reserved; +} Il2CppHStringHeader; +typedef struct Il2CppGuid +{ + uint32_t data1; + uint16_t data2; + uint16_t data3; + uint8_t data4[8]; +} Il2CppGuid; +typedef struct Il2CppSafeArrayBound +{ + uint32_t element_count; + int32_t lower_bound; +} Il2CppSafeArrayBound; +typedef struct Il2CppSafeArray +{ + uint16_t dimension_count; + uint16_t features; + uint32_t element_size; + uint32_t lock_count; + void* data; + Il2CppSafeArrayBound bounds[1]; +} Il2CppSafeArray; +typedef struct Il2CppWin32Decimal +{ + uint16_t reserved; + union + { + struct + { + uint8_t scale; + uint8_t sign; + } s; + uint16_t signscale; + } u; + uint32_t hi32; + union + { + struct + { + uint32_t lo32; + uint32_t mid32; + } s2; + uint64_t lo64; + } u2; +} Il2CppWin32Decimal; +typedef int16_t IL2CPP_VARIANT_BOOL; +typedef enum Il2CppVarType +{ + IL2CPP_VT_EMPTY = 0, + IL2CPP_VT_NULL = 1, + IL2CPP_VT_I2 = 2, + IL2CPP_VT_I4 = 3, + IL2CPP_VT_R4 = 4, + IL2CPP_VT_R8 = 5, + IL2CPP_VT_CY = 6, + IL2CPP_VT_DATE = 7, + IL2CPP_VT_BSTR = 8, + IL2CPP_VT_DISPATCH = 9, + IL2CPP_VT_ERROR = 10, + IL2CPP_VT_BOOL = 11, + IL2CPP_VT_VARIANT = 12, + IL2CPP_VT_UNKNOWN = 13, + IL2CPP_VT_DECIMAL = 14, + IL2CPP_VT_I1 = 16, + IL2CPP_VT_UI1 = 17, + IL2CPP_VT_UI2 = 18, + IL2CPP_VT_UI4 = 19, + IL2CPP_VT_I8 = 20, + IL2CPP_VT_UI8 = 21, + IL2CPP_VT_INT = 22, + IL2CPP_VT_UINT = 23, + IL2CPP_VT_VOID = 24, + IL2CPP_VT_HRESULT = 25, + IL2CPP_VT_PTR = 26, + IL2CPP_VT_SAFEARRAY = 27, + IL2CPP_VT_CARRAY = 28, + IL2CPP_VT_USERDEFINED = 29, + IL2CPP_VT_LPSTR = 30, + IL2CPP_VT_LPWSTR = 31, + IL2CPP_VT_RECORD = 36, + IL2CPP_VT_INT_PTR = 37, + IL2CPP_VT_UINT_PTR = 38, + IL2CPP_VT_FILETIME = 64, + IL2CPP_VT_BLOB = 65, + IL2CPP_VT_STREAM = 66, + IL2CPP_VT_STORAGE = 67, + IL2CPP_VT_STREAMED_OBJECT = 68, + IL2CPP_VT_STORED_OBJECT = 69, + IL2CPP_VT_BLOB_OBJECT = 70, + IL2CPP_VT_CF = 71, + IL2CPP_VT_CLSID = 72, + IL2CPP_VT_VERSIONED_STREAM = 73, + IL2CPP_VT_BSTR_BLOB = 0xfff, + IL2CPP_VT_VECTOR = 0x1000, + IL2CPP_VT_ARRAY = 0x2000, + IL2CPP_VT_BYREF = 0x4000, + IL2CPP_VT_RESERVED = 0x8000, + IL2CPP_VT_ILLEGAL = 0xffff, + IL2CPP_VT_ILLEGALMASKED = 0xfff, + IL2CPP_VT_TYPEMASK = 0xfff, +} Il2CppVarType; +typedef struct Il2CppVariant Il2CppVariant; +typedef struct Il2CppIUnknown Il2CppIUnknown; +typedef struct Il2CppVariant +{ + union + { + struct __tagVARIANT + { + uint16_t type; + uint16_t reserved1; + uint16_t reserved2; + uint16_t reserved3; + union + { + int64_t llVal; + int32_t lVal; + uint8_t bVal; + int16_t iVal; + float fltVal; + double dblVal; + IL2CPP_VARIANT_BOOL boolVal; + int32_t scode; + int64_t cyVal; + double date; + Il2CppChar* bstrVal; + Il2CppIUnknown* punkVal; + void* pdispVal; + Il2CppSafeArray* parray; + uint8_t* pbVal; + int16_t* piVal; + int32_t* plVal; + int64_t* pllVal; + float* pfltVal; + double* pdblVal; + IL2CPP_VARIANT_BOOL* pboolVal; + int32_t* pscode; + int64_t* pcyVal; + double* pdate; + Il2CppChar* pbstrVal; + Il2CppIUnknown** ppunkVal; + void** ppdispVal; + Il2CppSafeArray** pparray; + Il2CppVariant* pvarVal; + void* byref; + char cVal; + uint16_t uiVal; + uint32_t ulVal; + uint64_t ullVal; + int intVal; + unsigned int uintVal; + Il2CppWin32Decimal* pdecVal; + char* pcVal; + uint16_t* puiVal; + uint32_t* pulVal; + uint64_t* pullVal; + int* pintVal; + unsigned int* puintVal; + struct __tagBRECORD + { + void* pvRecord; + void* pRecInfo; + } n4; + } n3; + } n2; + Il2CppWin32Decimal decVal; + } n1; +} Il2CppVariant; +typedef struct Il2CppFileTime +{ + uint32_t low; + uint32_t high; +} Il2CppFileTime; +typedef struct Il2CppStatStg +{ + Il2CppChar* name; + uint32_t type; + uint64_t size; + Il2CppFileTime mtime; + Il2CppFileTime ctime; + Il2CppFileTime atime; + uint32_t mode; + uint32_t locks; + Il2CppGuid clsid; + uint32_t state; + uint32_t reserved; +} Il2CppStatStg; +typedef enum Il2CppWindowsRuntimeTypeKind +{ + kTypeKindPrimitive = 0, + kTypeKindMetadata, + kTypeKindCustom +} Il2CppWindowsRuntimeTypeKind; +typedef struct Il2CppWindowsRuntimeTypeName +{ + Il2CppHString typeName; + enum Il2CppWindowsRuntimeTypeKind typeKind; +} Il2CppWindowsRuntimeTypeName; +typedef void (*PInvokeMarshalToNativeFunc)(void* managedStructure, void* marshaledStructure); +typedef void (*PInvokeMarshalFromNativeFunc)(void* marshaledStructure, void* managedStructure); +typedef void (*PInvokeMarshalCleanupFunc)(void* marshaledStructure); +typedef struct Il2CppIUnknown* (*CreateCCWFunc)(Il2CppObject* obj); +typedef struct Il2CppInteropData +{ + Il2CppMethodPointer delegatePInvokeWrapperFunction; + PInvokeMarshalToNativeFunc pinvokeMarshalToNativeFunction; + PInvokeMarshalFromNativeFunc pinvokeMarshalFromNativeFunction; + PInvokeMarshalCleanupFunc pinvokeMarshalCleanupFunction; + CreateCCWFunc createCCWFunction; + const Il2CppGuid* guid; + const Il2CppType* type; +} Il2CppInteropData; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppAppDomainSetup Il2CppAppDomainSetup; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct Il2CppCodeGenModule Il2CppCodeGenModule; +typedef struct VirtualInvokeData +{ + Il2CppMethodPointer methodPtr; + const MethodInfo* method; +} VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +typedef struct Il2CppDefaults +{ + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *attribute_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; + Il2CppClass* ireference_class; + Il2CppClass* ireferencearray_class; + Il2CppClass* ikey_value_pair_class; + Il2CppClass* key_value_pair_class; + Il2CppClass* windows_foundation_uri_class; + Il2CppClass* windows_foundation_iuri_runtime_class_class; + Il2CppClass* system_uri_class; + Il2CppClass* system_guid_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(Il2CppMethodPointer, const MethodInfo*, void*, void**); +typedef enum MethodVariableKind +{ + kMethodVariableKind_This, + kMethodVariableKind_Parameter, + kMethodVariableKind_LocalVariable +} MethodVariableKind; +typedef enum SequencePointKind +{ + kSequencePointKind_Normal, + kSequencePointKind_StepOut +} SequencePointKind; +typedef struct Il2CppMethodExecutionContextInfo +{ + TypeIndex typeIndex; + int32_t nameIndex; + int32_t scopeIndex; +} Il2CppMethodExecutionContextInfo; +typedef struct Il2CppMethodExecutionContextInfoIndex +{ + int32_t startIndex; + int32_t count; +} Il2CppMethodExecutionContextInfoIndex; +typedef struct Il2CppMethodScope +{ + int32_t startOffset; + int32_t endOffset; +} Il2CppMethodScope; +typedef struct Il2CppMethodHeaderInfo +{ + int32_t code_size; + int32_t startScope; + int32_t numScopes; +} Il2CppMethodHeaderInfo; +typedef struct Il2CppSequencePointSourceFile +{ + const char *file; + uint8_t hash[16]; +} Il2CppSequencePointSourceFile; +typedef struct Il2CppTypeSourceFilePair +{ + TypeDefinitionIndex klassIndex; + int32_t sourceFileIndex; +} Il2CppTypeSourceFilePair; +typedef struct Il2CppSequencePoint +{ + MethodIndex methodDefinitionIndex; + TypeIndex catchTypeIndex; + int32_t sourceFileIndex; + int32_t lineStart, lineEnd; + int32_t columnStart, columnEnd; + int32_t ilOffset; + SequencePointKind kind; + int32_t isActive; + int32_t id; + uint8_t tryDepth; +} Il2CppSequencePoint; +typedef struct Il2CppDebuggerMetadataRegistration +{ + Il2CppMethodExecutionContextInfo* methodExecutionContextInfos; + Il2CppMethodExecutionContextInfoIndex* methodExecutionContextInfoIndexes; + Il2CppMethodScope* methodScopes; + Il2CppMethodHeaderInfo* methodHeaderInfos; + Il2CppSequencePointSourceFile* sequencePointSourceFiles; + int32_t numSequencePoints; + Il2CppSequencePoint* sequencePoints; + int32_t numTypeSourceFileEntries; + Il2CppTypeSourceFilePair* typeSourceFiles; + const char** methodExecutionContextInfoStrings; +} Il2CppDebuggerMetadataRegistration; +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *klass; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; + uint8_t wrapper_type : 1; + uint8_t is_marshaled_from_native : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + Il2CppType byval_arg; + Il2CppType this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + const Il2CppInteropData* interopData; + Il2CppClass* klass; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + void *unity_user_data; + uint32_t initializationExceptionGCHandle; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) size_t cctor_thread; + GenericContainerIndex genericContainerIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t genericRecursionDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t naturalAligment; + uint8_t packingSize; + uint8_t initialized_and_no_error : 1; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; + uint8_t is_vtable_initialized : 1; + uint8_t has_initialization_error : 1; + VirtualInvokeData vtable[32]; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppAppDomainSetup* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; + void* agent_info; +} Il2CppDomain; +typedef struct Il2CppAssemblyName +{ + const char* name; + const char* culture; + const char* hash_value; + const char* public_key; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t public_key_token[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImage +{ + const char* name; + const char *nameNoExt; + Il2CppAssembly* assembly; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + CustomAttributeIndex customAttributeStart; + uint32_t customAttributeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable * nameToClassHashTable; + const Il2CppCodeGenModule* codeGenModule; + uint32_t token; + uint8_t dynamic; +} Il2CppImage; +typedef struct Il2CppAssembly +{ + Il2CppImage* image; + uint32_t token; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppCodeGenOptions +{ + uint8_t enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppTokenIndexPair +{ + uint32_t token; + int32_t index; +} Il2CppTokenIndexPair; +typedef struct Il2CppTokenRangePair +{ + uint32_t token; + Il2CppRange range; +} Il2CppTokenRangePair; +typedef struct Il2CppCodeGenModule +{ + const char* moduleName; + const uint32_t methodPointerCount; + const Il2CppMethodPointer* methodPointers; + const int32_t* invokerIndices; + const uint32_t reversePInvokeWrapperCount; + const Il2CppTokenIndexPair* reversePInvokeWrapperIndices; + const uint32_t rgctxRangesCount; + const Il2CppTokenRangePair* rgctxRanges; + const uint32_t rgctxsCount; + const Il2CppRGCTXDefinition* rgctxs; + const Il2CppDebuggerMetadataRegistration *debuggerMetadata; +} Il2CppCodeGenModule; +typedef struct Il2CppCodeRegistration +{ + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + uint32_t unresolvedVirtualCallCount; + const Il2CppMethodPointer* unresolvedVirtualCallPointers; + uint32_t interopDataCount; + Il2CppInteropData* interopData; + uint32_t codeGenModulesCount; + const Il2CppCodeGenModule** codeGenModules; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/2019.3.0f6/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/2019.3.0f6/il2cpp-api-functions.h new file mode 100644 index 00000000..43293e37 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2019.3.0f6/il2cpp-api-functions.h @@ -0,0 +1,304 @@ +#ifndef DO_API_NO_RETURN +#define DO_API_NO_RETURN(r, n, p) DO_API(r,n,p) +#endif + +DO_API(int, il2cpp_init, (const char* domain_name)); +DO_API(int, il2cpp_init_utf16, (const Il2CppChar * domain_name)); +DO_API(void, il2cpp_shutdown, ()); +DO_API(void, il2cpp_set_config_dir, (const char *config_path)); +DO_API(void, il2cpp_set_data_dir, (const char *data_path)); +DO_API(void, il2cpp_set_temp_dir, (const char *temp_path)); +DO_API(void, il2cpp_set_commandline_arguments, (int argc, const char* const argv[], const char* basedir)); +DO_API(void, il2cpp_set_commandline_arguments_utf16, (int argc, const Il2CppChar * const argv[], const char* basedir)); +DO_API(void, il2cpp_set_config_utf16, (const Il2CppChar * executablePath)); +DO_API(void, il2cpp_set_config, (const char* executablePath)); + +DO_API(void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks * callbacks)); +DO_API(const Il2CppImage*, il2cpp_get_corlib, ()); +DO_API(void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method)); +DO_API(Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name)); + +DO_API(void*, il2cpp_alloc, (size_t size)); +DO_API(void, il2cpp_free, (void* ptr)); + +// array +DO_API(Il2CppClass*, il2cpp_array_class_get, (Il2CppClass * element_class, uint32_t rank)); +DO_API(uint32_t, il2cpp_array_length, (Il2CppArray * array)); +DO_API(uint32_t, il2cpp_array_get_byte_length, (Il2CppArray * array)); +DO_API(Il2CppArray*, il2cpp_array_new, (Il2CppClass * elementTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass * arrayTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_full, (Il2CppClass * array_class, il2cpp_array_size_t * lengths, il2cpp_array_size_t * lower_bounds)); +DO_API(Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass * element_class, uint32_t rank, bool bounded)); +DO_API(int, il2cpp_array_element_size, (const Il2CppClass * array_class)); + +// assembly +DO_API(const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly * assembly)); + +// class +DO_API(void, il2cpp_class_for_each, (void(*klassReportFunc)(Il2CppClass* klass, void* userData), void* userData)); +DO_API(const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_generic, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_inflated, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_assignable_from, (Il2CppClass * klass, Il2CppClass * oklass)); +DO_API(bool, il2cpp_class_is_subclass_of, (Il2CppClass * klass, Il2CppClass * klassc, bool check_interfaces)); +DO_API(bool, il2cpp_class_has_parent, (Il2CppClass * klass, Il2CppClass * klassc)); +DO_API(Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage * image, const char* namespaze, const char *name)); +DO_API(Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType * type)); +DO_API(Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass * klass)); +DO_API(const EventInfo*, il2cpp_class_get_events, (Il2CppClass * klass, void* *iter)); +DO_API(FieldInfo*, il2cpp_class_get_fields, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass * klass, const char *name)); +DO_API(FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass * klass, const char *name)); +DO_API(const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass * klass, void* *iter)); +DO_API(const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass * klass, const char* name, int argsCount)); +DO_API(const char*, il2cpp_class_get_name, (Il2CppClass * klass)); +DO_API(void, il2cpp_type_get_name_chunked, (const Il2CppType * type, void(*chunkReportFunc)(void* data, void* userData), void* userData)); +DO_API(const char*, il2cpp_class_get_namespace, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_instance_size, (Il2CppClass * klass)); +DO_API(size_t, il2cpp_class_num_fields, (const Il2CppClass * enumKlass)); +DO_API(bool, il2cpp_class_is_valuetype, (const Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_value_size, (Il2CppClass * klass, uint32_t * align)); +DO_API(bool, il2cpp_class_is_blittable, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_flags, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_abstract, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_interface, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_array_element_size, (const Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_from_type, (const Il2CppType * type)); +DO_API(const Il2CppType*, il2cpp_class_get_type, (Il2CppClass * klass)); +DO_API(uint32_t, il2cpp_class_get_type_token, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_has_attribute, (Il2CppClass * klass, Il2CppClass * attr_class)); +DO_API(bool, il2cpp_class_has_references, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_enum, (const Il2CppClass * klass)); +DO_API(const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_assemblyname, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_rank, (const Il2CppClass * klass)); +DO_API(uint32_t, il2cpp_class_get_data_size, (const Il2CppClass * klass)); +DO_API(void*, il2cpp_class_get_static_field_data, (const Il2CppClass * klass)); + +// testing only +DO_API(size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass * klass)); +DO_API(void, il2cpp_class_get_bitmap, (Il2CppClass * klass, size_t * bitmap)); + +// stats +DO_API(bool, il2cpp_stats_dump_to_file, (const char *path)); +DO_API(uint64_t, il2cpp_stats_get_value, (Il2CppStat stat)); + +// domain +DO_API(Il2CppDomain*, il2cpp_domain_get, ()); +DO_API(const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain * domain, const char* name)); +DO_API(const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain * domain, size_t * size)); + +// exception +DO_API_NO_RETURN(void, il2cpp_raise_exception, (Il2CppException*)); +DO_API(Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage * image, const char *name_space, const char *name, const char *msg)); +DO_API(Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg)); +DO_API(void, il2cpp_format_exception, (const Il2CppException * ex, char* message, int message_size)); +DO_API(void, il2cpp_format_stack_trace, (const Il2CppException * ex, char* output, int output_size)); +DO_API(void, il2cpp_unhandled_exception, (Il2CppException*)); + +// field +DO_API(int, il2cpp_field_get_flags, (FieldInfo * field)); +DO_API(const char*, il2cpp_field_get_name, (FieldInfo * field)); +DO_API(Il2CppClass*, il2cpp_field_get_parent, (FieldInfo * field)); +DO_API(size_t, il2cpp_field_get_offset, (FieldInfo * field)); +DO_API(const Il2CppType*, il2cpp_field_get_type, (FieldInfo * field)); +DO_API(void, il2cpp_field_get_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo * field, Il2CppObject * obj)); +DO_API(bool, il2cpp_field_has_attribute, (FieldInfo * field, Il2CppClass * attr_class)); +DO_API(void, il2cpp_field_set_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_get_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_set_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_set_value_object, (Il2CppObject * instance, FieldInfo * field, Il2CppObject * value)); +DO_API(bool, il2cpp_field_is_literal, (FieldInfo * field)); +// gc +DO_API(void, il2cpp_gc_collect, (int maxGenerations)); +DO_API(int32_t, il2cpp_gc_collect_a_little, ()); +DO_API(void, il2cpp_gc_disable, ()); +DO_API(void, il2cpp_gc_enable, ()); +DO_API(bool, il2cpp_gc_is_disabled, ()); +DO_API(int64_t, il2cpp_gc_get_max_time_slice_ns, ()); +DO_API(void, il2cpp_gc_set_max_time_slice_ns, (int64_t maxTimeSlice)); +DO_API(bool, il2cpp_gc_is_incremental, ()); +DO_API(int64_t, il2cpp_gc_get_used_size, ()); +DO_API(int64_t, il2cpp_gc_get_heap_size, ()); +DO_API(void, il2cpp_gc_wbarrier_set_field, (Il2CppObject * obj, void **targetAddress, void *object)); +DO_API(bool, il2cpp_gc_has_strict_wbarriers, ()); +DO_API(void, il2cpp_gc_set_external_allocation_tracker, (void(*func)(void*, size_t, int))); +DO_API(void, il2cpp_gc_set_external_wbarrier_tracker, (void(*func)(void**))); +DO_API(void, il2cpp_gc_foreach_heap, (void(*func)(void* data, void* userData), void* userData)); +DO_API(void, il2cpp_stop_gc_world, ()); +DO_API(void, il2cpp_start_gc_world, ()); +// gchandle +DO_API(uint32_t, il2cpp_gchandle_new, (Il2CppObject * obj, bool pinned)); +DO_API(uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject * obj, bool track_resurrection)); +DO_API(Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle)); +DO_API(void, il2cpp_gchandle_free, (uint32_t gchandle)); +DO_API(void , il2cpp_gchandle_foreach_get_target, (void(*func)(void* data, void* userData), void* userData)); + +// vm runtime info +DO_API(uint32_t, il2cpp_object_header_size, ()); +DO_API(uint32_t, il2cpp_array_object_header_size, ()); +DO_API(uint32_t, il2cpp_offset_of_array_length_in_array_object_header, ()); +DO_API(uint32_t, il2cpp_offset_of_array_bounds_in_array_object_header, ()); +DO_API(uint32_t, il2cpp_allocation_granularity, ()); + +// liveness +DO_API(void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass * filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped)); +DO_API(void, il2cpp_unity_liveness_calculation_end, (void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject * root, void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_statics, (void* state)); + +// method +DO_API(const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo * method)); +DO_API(Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_name, (const MethodInfo * method)); +DO_API(const MethodInfo*, il2cpp_method_get_from_reflection, (const Il2CppReflectionMethod * method)); +DO_API(Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo * method, Il2CppClass * refclass)); +DO_API(bool, il2cpp_method_is_generic, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_inflated, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_instance, (const MethodInfo * method)); +DO_API(uint32_t, il2cpp_method_get_param_count, (const MethodInfo * method)); +DO_API(const Il2CppType*, il2cpp_method_get_param, (const MethodInfo * method, uint32_t index)); +DO_API(Il2CppClass*, il2cpp_method_get_class, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_has_attribute, (const MethodInfo * method, Il2CppClass * attr_class)); +DO_API(uint32_t, il2cpp_method_get_flags, (const MethodInfo * method, uint32_t * iflags)); +DO_API(uint32_t, il2cpp_method_get_token, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_param_name, (const MethodInfo * method, uint32_t index)); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API(void, il2cpp_profiler_install, (Il2CppProfiler * prof, Il2CppProfileFunc shutdown_callback)); +DO_API(void, il2cpp_profiler_set_events, (Il2CppProfileFlags events)); +DO_API(void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave)); +DO_API(void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback)); +DO_API(void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback)); +DO_API(void, il2cpp_profiler_install_fileio, (Il2CppProfileFileIOFunc callback)); +DO_API(void, il2cpp_profiler_install_thread, (Il2CppProfileThreadFunc start, Il2CppProfileThreadFunc end)); + +#endif + +// property +DO_API(uint32_t, il2cpp_property_get_flags, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo * prop)); +DO_API(const char*, il2cpp_property_get_name, (PropertyInfo * prop)); +DO_API(Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo * prop)); + +// object +DO_API(Il2CppClass*, il2cpp_object_get_class, (Il2CppObject * obj)); +DO_API(uint32_t, il2cpp_object_get_size, (Il2CppObject * obj)); +DO_API(const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject * obj, const MethodInfo * method)); +DO_API(Il2CppObject*, il2cpp_object_new, (const Il2CppClass * klass)); +DO_API(void*, il2cpp_object_unbox, (Il2CppObject * obj)); + +DO_API(Il2CppObject*, il2cpp_value_box, (Il2CppClass * klass, void* data)); + +// monitor +DO_API(void, il2cpp_monitor_enter, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_enter, (Il2CppObject * obj, uint32_t timeout)); +DO_API(void, il2cpp_monitor_exit, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse_all, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_wait, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_wait, (Il2CppObject * obj, uint32_t timeout)); + +// runtime +DO_API(Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo * method, void *obj, void **params, Il2CppException **exc)); +DO_API(Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo * method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc)); +DO_API(void, il2cpp_runtime_class_init, (Il2CppClass * klass)); +DO_API(void, il2cpp_runtime_object_init, (Il2CppObject * obj)); + +DO_API(void, il2cpp_runtime_object_init_exception, (Il2CppObject * obj, Il2CppException** exc)); + +DO_API(void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value)); + +// string +DO_API(int32_t, il2cpp_string_length, (Il2CppString * str)); +DO_API(Il2CppChar*, il2cpp_string_chars, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_new, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length)); +DO_API(Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar * text, int32_t len)); +DO_API(Il2CppString*, il2cpp_string_new_wrapper, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_intern, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_is_interned, (Il2CppString * str)); + +// thread +DO_API(Il2CppThread*, il2cpp_thread_current, ()); +DO_API(Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain * domain)); +DO_API(void, il2cpp_thread_detach, (Il2CppThread * thread)); + +DO_API(Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t * size)); +DO_API(bool, il2cpp_is_vm_thread, (Il2CppThread * thread)); + +// stacktrace +DO_API(void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data)); +DO_API(void, il2cpp_thread_walk_frame_stack, (Il2CppThread * thread, Il2CppFrameWalkFunc func, void* user_data)); +DO_API(bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_thread_get_top_frame, (Il2CppThread * thread, Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_thread_get_frame_at, (Il2CppThread * thread, int32_t offset, Il2CppStackFrameInfo * frame)); +DO_API(int32_t, il2cpp_current_thread_get_stack_depth, ()); +DO_API(int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread * thread)); +DO_API(void, il2cpp_override_stack_backtrace, (Il2CppBacktraceFunc stackBacktraceFunc)); + +// type +DO_API(Il2CppObject*, il2cpp_type_get_object, (const Il2CppType * type)); +DO_API(int, il2cpp_type_get_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType * type)); +DO_API(char*, il2cpp_type_get_name, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_is_byref, (const Il2CppType * type)); +DO_API(uint32_t, il2cpp_type_get_attrs, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_equals, (const Il2CppType * type, const Il2CppType * otherType)); +DO_API(char*, il2cpp_type_get_assembly_qualified_name, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_is_static, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_is_pointer_type, (const Il2CppType * type)); + +// image +DO_API(const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_name, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_filename, (const Il2CppImage * image)); +DO_API(const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage * image)); + +DO_API(size_t, il2cpp_image_get_class_count, (const Il2CppImage * image)); +DO_API(const Il2CppClass*, il2cpp_image_get_class, (const Il2CppImage * image, size_t index)); + +// Memory information +DO_API(Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, ()); +DO_API(void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot * snapshot)); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +// Logging +DO_API(void, il2cpp_register_log_callback, (Il2CppLogCallback method)); + +// Debugger +DO_API(void, il2cpp_debugger_set_agent_options, (const char* options)); +DO_API(bool, il2cpp_is_debugger_attached, ()); +DO_API(void, il2cpp_register_debugger_agent_transport, (Il2CppDebuggerTransport * debuggerTransport)); + +// Debug metadata +DO_API(bool, il2cpp_debug_get_method_info, (const MethodInfo*, Il2CppMethodDebugInfo * methodDebugInfo)); + +// TLS module +DO_API(void, il2cpp_unity_install_unitytls_interface, (const void* unitytlsInterfaceStruct)); + +// custom attributes +DO_API(Il2CppCustomAttrInfo*, il2cpp_custom_attrs_from_class, (Il2CppClass * klass)); +DO_API(Il2CppCustomAttrInfo*, il2cpp_custom_attrs_from_method, (const MethodInfo * method)); + +DO_API(Il2CppObject*, il2cpp_custom_attrs_get_attr, (Il2CppCustomAttrInfo * ainfo, Il2CppClass * attr_klass)); +DO_API(bool, il2cpp_custom_attrs_has_attr, (Il2CppCustomAttrInfo * ainfo, Il2CppClass * attr_klass)); +DO_API(Il2CppArray*, il2cpp_custom_attrs_construct, (Il2CppCustomAttrInfo * cinfo)); + +DO_API(void, il2cpp_custom_attrs_free, (Il2CppCustomAttrInfo * ainfo)); + +// Il2CppClass user data for GetComponent optimization +DO_API(void, il2cpp_class_set_userdata, (Il2CppClass * klass, void* userdata)); +DO_API(int, il2cpp_class_get_userdata_offset, ()); diff --git a/module/src/main/cpp/il2cppapi/2019.3.0f6/il2cpp-class.h b/module/src/main/cpp/il2cppapi/2019.3.0f6/il2cpp-class.h new file mode 100644 index 00000000..f236656c --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2019.3.0f6/il2cpp-class.h @@ -0,0 +1,1405 @@ +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef struct Il2CppCustomAttrInfo Il2CppCustomAttrInfo; +typedef enum +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19, + IL2CPP_PROFILE_FILEIO = 1 << 20 +} Il2CppProfileFlags; +typedef enum +{ + IL2CPP_PROFILE_FILEIO_WRITE = 0, + IL2CPP_PROFILE_FILEIO_READ +} Il2CppProfileFileIOKind; +typedef enum +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef void(*Il2CppMethodPointer)(); +typedef struct Il2CppMethodDebugInfo +{ + Il2CppMethodPointer methodPointer; + int32_t code_size; + const char *file; +} Il2CppMethodDebugInfo; +typedef struct +{ + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef struct +{ + const char *name; + void(*connect)(const char *address); + int(*wait_for_attach)(void); + void(*close1)(void); + void(*close2)(void); + int(*send)(void *buf, int len); + int(*recv)(void *buf, int len); +} Il2CppDebuggerTransport; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef void (*Il2CppProfileFileIOFunc) (Il2CppProfiler* prof, Il2CppProfileFileIOKind kind, int count); +typedef void (*Il2CppProfileThreadFunc) (Il2CppProfiler *prof, unsigned long tid); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef void (*Il2CppLogCallback)(const char*); +typedef size_t(*Il2CppBacktraceFunc) (Il2CppMethodPointer* buffer, size_t maxSize); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef uintptr_t il2cpp_array_size_t; +typedef void ( *SynchronizationContextCallback)(intptr_t arg); +typedef uint32_t Il2CppMethodSlot; +static const uint32_t kInvalidIl2CppMethodSlot = 65535; +static const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST = 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef enum +{ + IL2CPP_TOKEN_MODULE = 0x00000000, + IL2CPP_TOKEN_TYPE_REF = 0x01000000, + IL2CPP_TOKEN_TYPE_DEF = 0x02000000, + IL2CPP_TOKEN_FIELD_DEF = 0x04000000, + IL2CPP_TOKEN_METHOD_DEF = 0x06000000, + IL2CPP_TOKEN_PARAM_DEF = 0x08000000, + IL2CPP_TOKEN_INTERFACE_IMPL = 0x09000000, + IL2CPP_TOKEN_MEMBER_REF = 0x0a000000, + IL2CPP_TOKEN_CUSTOM_ATTRIBUTE = 0x0c000000, + IL2CPP_TOKEN_PERMISSION = 0x0e000000, + IL2CPP_TOKEN_SIGNATURE = 0x11000000, + IL2CPP_TOKEN_EVENT = 0x14000000, + IL2CPP_TOKEN_PROPERTY = 0x17000000, + IL2CPP_TOKEN_MODULE_REF = 0x1a000000, + IL2CPP_TOKEN_TYPE_SPEC = 0x1b000000, + IL2CPP_TOKEN_ASSEMBLY = 0x20000000, + IL2CPP_TOKEN_ASSEMBLY_REF = 0x23000000, + IL2CPP_TOKEN_FILE = 0x26000000, + IL2CPP_TOKEN_EXPORTED_TYPE = 0x27000000, + IL2CPP_TOKEN_MANIFEST_RESOURCE = 0x28000000, + IL2CPP_TOKEN_GENERIC_PARAM = 0x2a000000, + IL2CPP_TOKEN_METHOD_SPEC = 0x2b000000, +} Il2CppTokenType; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t InteropDataIndex; +static const TypeIndex kTypeIndexInvalid = -1; +static const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +static const DefaultValueDataIndex kDefaultValueIndexNull = -1; +static const CustomAttributeIndex kCustomAttributeIndexInvalid = -1; +static const EventIndex kEventIndexInvalid = -1; +static const FieldIndex kFieldIndexInvalid = -1; +static const MethodIndex kMethodIndexInvalid = -1; +static const PropertyIndex kPropertyIndexInvalid = -1; +static const GenericContainerIndex kGenericContainerIndexInvalid = -1; +static const GenericParameterIndex kGenericParameterIndexInvalid = -1; +static const RGCTXIndex kRGCTXIndexInvalid = -1; +static const StringLiteralIndex kStringLiteralIndexInvalid = -1; +static const InteropDataIndex kInteropDataIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD, + IL2CPP_RGCTX_DATA_ARRAY, +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + GenericContainerIndex genericContainerIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + GenericContainerIndex genericContainerIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +static const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyNameDefinition +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t public_key_token[8]; +} Il2CppAssemblyNameDefinition; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + uint32_t token; + CustomAttributeIndex customAttributeStart; + uint32_t customAttributeCount; +} Il2CppImageDefinition; +typedef struct Il2CppAssemblyDefinition +{ + ImageIndex imageIndex; + uint32_t token; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyNameDefinition aname; +} Il2CppAssemblyDefinition; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + uint32_t token; + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +typedef struct Il2CppRange +{ + int32_t start; + int32_t length; +} Il2CppRange; +typedef struct Il2CppWindowsRuntimeTypeNamePair +{ + StringIndex nameIndex; + TypeIndex typeIndex; +} Il2CppWindowsRuntimeTypeNamePair; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; + int32_t unresolvedVirtualCallParameterTypesOffset; + int32_t unresolvedVirtualCallParameterTypesCount; + int32_t unresolvedVirtualCallParameterRangesOffset; + int32_t unresolvedVirtualCallParameterRangesCount; + int32_t windowsRuntimeTypeNamesOffset; + int32_t windowsRuntimeTypeNamesSize; + int32_t exportedTypeDefinitionsOffset; + int32_t exportedTypeDefinitionsCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union + { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum Il2CppCallConvention +{ + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE, + CHARSET_NOT_SPECIFIED +} Il2CppCharSet; +typedef struct Il2CppHString__ +{ + int unused; +} Il2CppHString__; +typedef Il2CppHString__* Il2CppHString; +typedef struct Il2CppHStringHeader +{ + union + { + void* Reserved1; + char Reserved2[24]; + } Reserved; +} Il2CppHStringHeader; +typedef struct Il2CppGuid +{ + uint32_t data1; + uint16_t data2; + uint16_t data3; + uint8_t data4[8]; +} Il2CppGuid; +typedef struct Il2CppSafeArrayBound +{ + uint32_t element_count; + int32_t lower_bound; +} Il2CppSafeArrayBound; +typedef struct Il2CppSafeArray +{ + uint16_t dimension_count; + uint16_t features; + uint32_t element_size; + uint32_t lock_count; + void* data; + Il2CppSafeArrayBound bounds[1]; +} Il2CppSafeArray; +typedef struct Il2CppWin32Decimal +{ + uint16_t reserved; + union + { + struct + { + uint8_t scale; + uint8_t sign; + } s; + uint16_t signscale; + } u; + uint32_t hi32; + union + { + struct + { + uint32_t lo32; + uint32_t mid32; + } s2; + uint64_t lo64; + } u2; +} Il2CppWin32Decimal; +typedef int16_t IL2CPP_VARIANT_BOOL; +typedef enum Il2CppVarType +{ + IL2CPP_VT_EMPTY = 0, + IL2CPP_VT_NULL = 1, + IL2CPP_VT_I2 = 2, + IL2CPP_VT_I4 = 3, + IL2CPP_VT_R4 = 4, + IL2CPP_VT_R8 = 5, + IL2CPP_VT_CY = 6, + IL2CPP_VT_DATE = 7, + IL2CPP_VT_BSTR = 8, + IL2CPP_VT_DISPATCH = 9, + IL2CPP_VT_ERROR = 10, + IL2CPP_VT_BOOL = 11, + IL2CPP_VT_VARIANT = 12, + IL2CPP_VT_UNKNOWN = 13, + IL2CPP_VT_DECIMAL = 14, + IL2CPP_VT_I1 = 16, + IL2CPP_VT_UI1 = 17, + IL2CPP_VT_UI2 = 18, + IL2CPP_VT_UI4 = 19, + IL2CPP_VT_I8 = 20, + IL2CPP_VT_UI8 = 21, + IL2CPP_VT_INT = 22, + IL2CPP_VT_UINT = 23, + IL2CPP_VT_VOID = 24, + IL2CPP_VT_HRESULT = 25, + IL2CPP_VT_PTR = 26, + IL2CPP_VT_SAFEARRAY = 27, + IL2CPP_VT_CARRAY = 28, + IL2CPP_VT_USERDEFINED = 29, + IL2CPP_VT_LPSTR = 30, + IL2CPP_VT_LPWSTR = 31, + IL2CPP_VT_RECORD = 36, + IL2CPP_VT_INT_PTR = 37, + IL2CPP_VT_UINT_PTR = 38, + IL2CPP_VT_FILETIME = 64, + IL2CPP_VT_BLOB = 65, + IL2CPP_VT_STREAM = 66, + IL2CPP_VT_STORAGE = 67, + IL2CPP_VT_STREAMED_OBJECT = 68, + IL2CPP_VT_STORED_OBJECT = 69, + IL2CPP_VT_BLOB_OBJECT = 70, + IL2CPP_VT_CF = 71, + IL2CPP_VT_CLSID = 72, + IL2CPP_VT_VERSIONED_STREAM = 73, + IL2CPP_VT_BSTR_BLOB = 0xfff, + IL2CPP_VT_VECTOR = 0x1000, + IL2CPP_VT_ARRAY = 0x2000, + IL2CPP_VT_BYREF = 0x4000, + IL2CPP_VT_RESERVED = 0x8000, + IL2CPP_VT_ILLEGAL = 0xffff, + IL2CPP_VT_ILLEGALMASKED = 0xfff, + IL2CPP_VT_TYPEMASK = 0xfff, +} Il2CppVarType; +typedef struct Il2CppVariant Il2CppVariant; +typedef struct Il2CppIUnknown Il2CppIUnknown; +typedef struct Il2CppVariant +{ + union + { + struct __tagVARIANT + { + uint16_t type; + uint16_t reserved1; + uint16_t reserved2; + uint16_t reserved3; + union + { + int64_t llVal; + int32_t lVal; + uint8_t bVal; + int16_t iVal; + float fltVal; + double dblVal; + IL2CPP_VARIANT_BOOL boolVal; + int32_t scode; + int64_t cyVal; + double date; + Il2CppChar* bstrVal; + Il2CppIUnknown* punkVal; + void* pdispVal; + Il2CppSafeArray* parray; + uint8_t* pbVal; + int16_t* piVal; + int32_t* plVal; + int64_t* pllVal; + float* pfltVal; + double* pdblVal; + IL2CPP_VARIANT_BOOL* pboolVal; + int32_t* pscode; + int64_t* pcyVal; + double* pdate; + Il2CppChar* pbstrVal; + Il2CppIUnknown** ppunkVal; + void** ppdispVal; + Il2CppSafeArray** pparray; + Il2CppVariant* pvarVal; + void* byref; + char cVal; + uint16_t uiVal; + uint32_t ulVal; + uint64_t ullVal; + int intVal; + unsigned int uintVal; + Il2CppWin32Decimal* pdecVal; + char* pcVal; + uint16_t* puiVal; + uint32_t* pulVal; + uint64_t* pullVal; + int* pintVal; + unsigned int* puintVal; + struct __tagBRECORD + { + void* pvRecord; + void* pRecInfo; + } n4; + } n3; + } n2; + Il2CppWin32Decimal decVal; + } n1; +} Il2CppVariant; +typedef struct Il2CppFileTime +{ + uint32_t low; + uint32_t high; +} Il2CppFileTime; +typedef struct Il2CppStatStg +{ + Il2CppChar* name; + uint32_t type; + uint64_t size; + Il2CppFileTime mtime; + Il2CppFileTime ctime; + Il2CppFileTime atime; + uint32_t mode; + uint32_t locks; + Il2CppGuid clsid; + uint32_t state; + uint32_t reserved; +} Il2CppStatStg; +typedef enum Il2CppWindowsRuntimeTypeKind +{ + kTypeKindPrimitive = 0, + kTypeKindMetadata, + kTypeKindCustom +} Il2CppWindowsRuntimeTypeKind; +typedef struct Il2CppWindowsRuntimeTypeName +{ + Il2CppHString typeName; + enum Il2CppWindowsRuntimeTypeKind typeKind; +} Il2CppWindowsRuntimeTypeName; +typedef void (*PInvokeMarshalToNativeFunc)(void* managedStructure, void* marshaledStructure); +typedef void (*PInvokeMarshalFromNativeFunc)(void* marshaledStructure, void* managedStructure); +typedef void (*PInvokeMarshalCleanupFunc)(void* marshaledStructure); +typedef struct Il2CppIUnknown* (*CreateCCWFunc)(Il2CppObject* obj); +typedef struct Il2CppInteropData +{ + Il2CppMethodPointer delegatePInvokeWrapperFunction; + PInvokeMarshalToNativeFunc pinvokeMarshalToNativeFunction; + PInvokeMarshalFromNativeFunc pinvokeMarshalFromNativeFunction; + PInvokeMarshalCleanupFunc pinvokeMarshalCleanupFunction; + CreateCCWFunc createCCWFunction; + const Il2CppGuid* guid; + const Il2CppType* type; +} Il2CppInteropData; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppAppDomainSetup Il2CppAppDomainSetup; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct Il2CppCodeGenModule Il2CppCodeGenModule; +typedef struct VirtualInvokeData +{ + Il2CppMethodPointer methodPtr; + const MethodInfo* method; +} VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +typedef struct Il2CppDefaults +{ + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *internal_thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_ireadonlylist_class; + Il2CppClass *generic_ireadonlycollection_class; + Il2CppClass *runtimetype_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *attribute_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *mono_assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *mono_parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; + Il2CppClass *threadpool_wait_callback_class; + MethodInfo *threadpool_perform_wait_callback_method; + Il2CppClass *mono_method_message_class; + Il2CppClass* ireference_class; + Il2CppClass* ireferencearray_class; + Il2CppClass* ikey_value_pair_class; + Il2CppClass* key_value_pair_class; + Il2CppClass* windows_foundation_uri_class; + Il2CppClass* windows_foundation_iuri_runtime_class_class; + Il2CppClass* system_uri_class; + Il2CppClass* system_guid_class; + Il2CppClass* sbyte_shared_enum; + Il2CppClass* int16_shared_enum; + Il2CppClass* int32_shared_enum; + Il2CppClass* int64_shared_enum; + Il2CppClass* byte_shared_enum; + Il2CppClass* uint16_shared_enum; + Il2CppClass* uint32_shared_enum; + Il2CppClass* uint64_shared_enum; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(Il2CppMethodPointer, const MethodInfo*, void*, void**); +typedef enum MethodVariableKind +{ + kMethodVariableKind_This, + kMethodVariableKind_Parameter, + kMethodVariableKind_LocalVariable +} MethodVariableKind; +typedef enum SequencePointKind +{ + kSequencePointKind_Normal, + kSequencePointKind_StepOut +} SequencePointKind; +typedef struct Il2CppMethodExecutionContextInfo +{ + TypeIndex typeIndex; + int32_t nameIndex; + int32_t scopeIndex; +} Il2CppMethodExecutionContextInfo; +typedef struct Il2CppMethodExecutionContextInfoIndex +{ + int32_t startIndex; + int32_t count; +} Il2CppMethodExecutionContextInfoIndex; +typedef struct Il2CppMethodScope +{ + int32_t startOffset; + int32_t endOffset; +} Il2CppMethodScope; +typedef struct Il2CppMethodHeaderInfo +{ + int32_t code_size; + int32_t startScope; + int32_t numScopes; +} Il2CppMethodHeaderInfo; +typedef struct Il2CppSequencePointSourceFile +{ + const char *file; + uint8_t hash[16]; +} Il2CppSequencePointSourceFile; +typedef struct Il2CppTypeSourceFilePair +{ + TypeDefinitionIndex klassIndex; + int32_t sourceFileIndex; +} Il2CppTypeSourceFilePair; +typedef struct Il2CppSequencePoint +{ + MethodIndex methodDefinitionIndex; + int32_t sourceFileIndex; + int32_t lineStart, lineEnd; + int32_t columnStart, columnEnd; + int32_t ilOffset; + SequencePointKind kind; + int32_t isActive; + int32_t id; +} Il2CppSequencePoint; +typedef struct Il2CppCatchPoint +{ + MethodIndex methodDefinitionIndex; + TypeIndex catchTypeIndex; + int32_t ilOffset; + int8_t tryId; + int8_t parentTryId; +} Il2CppCatchPoint; +typedef struct Il2CppDebuggerMetadataRegistration +{ + Il2CppMethodExecutionContextInfo* methodExecutionContextInfos; + Il2CppMethodExecutionContextInfoIndex* methodExecutionContextInfoIndexes; + Il2CppMethodScope* methodScopes; + Il2CppMethodHeaderInfo* methodHeaderInfos; + Il2CppSequencePointSourceFile* sequencePointSourceFiles; + int32_t numSequencePoints; + Il2CppSequencePoint* sequencePoints; + int32_t numCatchPoints; + Il2CppCatchPoint* catchPoints; + int32_t numTypeSourceFileEntries; + Il2CppTypeSourceFilePair* typeSourceFiles; + const char** methodExecutionContextInfoStrings; +} Il2CppDebuggerMetadataRegistration; +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *klass; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; + uint8_t wrapper_type : 1; + uint8_t is_marshaled_from_native : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + Il2CppType byval_arg; + Il2CppType this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + const Il2CppInteropData* interopData; + Il2CppClass* klass; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + void *unity_user_data; + uint32_t initializationExceptionGCHandle; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) size_t cctor_thread; + GenericContainerIndex genericContainerIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t genericRecursionDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t naturalAligment; + uint8_t packingSize; + uint8_t initialized_and_no_error : 1; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; + uint8_t is_vtable_initialized : 1; + uint8_t has_initialization_error : 1; + VirtualInvokeData vtable[32]; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppAppDomainSetup* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; + volatile int threadpool_jobs; + void* agent_info; +} Il2CppDomain; +typedef struct Il2CppAssemblyName +{ + const char* name; + const char* culture; + const char* hash_value; + const char* public_key; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t public_key_token[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImage +{ + const char* name; + const char *nameNoExt; + Il2CppAssembly* assembly; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + CustomAttributeIndex customAttributeStart; + uint32_t customAttributeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable * nameToClassHashTable; + const Il2CppCodeGenModule* codeGenModule; + uint32_t token; + uint8_t dynamic; +} Il2CppImage; +typedef struct Il2CppAssembly +{ + Il2CppImage* image; + uint32_t token; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppCodeGenOptions +{ + uint8_t enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppTokenIndexPair +{ + uint32_t token; + int32_t index; +} Il2CppTokenIndexPair; +typedef struct Il2CppTokenRangePair +{ + uint32_t token; + Il2CppRange range; +} Il2CppTokenRangePair; +typedef struct Il2CppTokenIndexMethodTuple +{ + uint32_t token; + int32_t index; + void** method; + uint32_t genericMethodIndex; +} Il2CppTokenIndexMethodTuple; +typedef struct Il2CppCodeGenModule +{ + const char* moduleName; + const uint32_t methodPointerCount; + const Il2CppMethodPointer* methodPointers; + const int32_t* invokerIndices; + const uint32_t reversePInvokeWrapperCount; + const Il2CppTokenIndexMethodTuple* reversePInvokeWrapperIndices; + const uint32_t rgctxRangesCount; + const Il2CppTokenRangePair* rgctxRanges; + const uint32_t rgctxsCount; + const Il2CppRGCTXDefinition* rgctxs; + const Il2CppDebuggerMetadataRegistration *debuggerMetadata; +} Il2CppCodeGenModule; +typedef struct Il2CppCodeRegistration +{ + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + uint32_t unresolvedVirtualCallCount; + const Il2CppMethodPointer* unresolvedVirtualCallPointers; + uint32_t interopDataCount; + Il2CppInteropData* interopData; + uint32_t codeGenModulesCount; + const Il2CppCodeGenModule** codeGenModules; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/2019.3.7f1/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/2019.3.7f1/il2cpp-api-functions.h new file mode 100644 index 00000000..43293e37 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2019.3.7f1/il2cpp-api-functions.h @@ -0,0 +1,304 @@ +#ifndef DO_API_NO_RETURN +#define DO_API_NO_RETURN(r, n, p) DO_API(r,n,p) +#endif + +DO_API(int, il2cpp_init, (const char* domain_name)); +DO_API(int, il2cpp_init_utf16, (const Il2CppChar * domain_name)); +DO_API(void, il2cpp_shutdown, ()); +DO_API(void, il2cpp_set_config_dir, (const char *config_path)); +DO_API(void, il2cpp_set_data_dir, (const char *data_path)); +DO_API(void, il2cpp_set_temp_dir, (const char *temp_path)); +DO_API(void, il2cpp_set_commandline_arguments, (int argc, const char* const argv[], const char* basedir)); +DO_API(void, il2cpp_set_commandline_arguments_utf16, (int argc, const Il2CppChar * const argv[], const char* basedir)); +DO_API(void, il2cpp_set_config_utf16, (const Il2CppChar * executablePath)); +DO_API(void, il2cpp_set_config, (const char* executablePath)); + +DO_API(void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks * callbacks)); +DO_API(const Il2CppImage*, il2cpp_get_corlib, ()); +DO_API(void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method)); +DO_API(Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name)); + +DO_API(void*, il2cpp_alloc, (size_t size)); +DO_API(void, il2cpp_free, (void* ptr)); + +// array +DO_API(Il2CppClass*, il2cpp_array_class_get, (Il2CppClass * element_class, uint32_t rank)); +DO_API(uint32_t, il2cpp_array_length, (Il2CppArray * array)); +DO_API(uint32_t, il2cpp_array_get_byte_length, (Il2CppArray * array)); +DO_API(Il2CppArray*, il2cpp_array_new, (Il2CppClass * elementTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass * arrayTypeInfo, il2cpp_array_size_t length)); +DO_API(Il2CppArray*, il2cpp_array_new_full, (Il2CppClass * array_class, il2cpp_array_size_t * lengths, il2cpp_array_size_t * lower_bounds)); +DO_API(Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass * element_class, uint32_t rank, bool bounded)); +DO_API(int, il2cpp_array_element_size, (const Il2CppClass * array_class)); + +// assembly +DO_API(const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly * assembly)); + +// class +DO_API(void, il2cpp_class_for_each, (void(*klassReportFunc)(Il2CppClass* klass, void* userData), void* userData)); +DO_API(const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_generic, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_inflated, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_assignable_from, (Il2CppClass * klass, Il2CppClass * oklass)); +DO_API(bool, il2cpp_class_is_subclass_of, (Il2CppClass * klass, Il2CppClass * klassc, bool check_interfaces)); +DO_API(bool, il2cpp_class_has_parent, (Il2CppClass * klass, Il2CppClass * klassc)); +DO_API(Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage * image, const char* namespaze, const char *name)); +DO_API(Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType * type)); +DO_API(Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass * klass)); +DO_API(const EventInfo*, il2cpp_class_get_events, (Il2CppClass * klass, void* *iter)); +DO_API(FieldInfo*, il2cpp_class_get_fields, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass * klass, void* *iter)); +DO_API(Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass * klass, void* *iter)); +DO_API(const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass * klass, const char *name)); +DO_API(FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass * klass, const char *name)); +DO_API(const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass * klass, void* *iter)); +DO_API(const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass * klass, const char* name, int argsCount)); +DO_API(const char*, il2cpp_class_get_name, (Il2CppClass * klass)); +DO_API(void, il2cpp_type_get_name_chunked, (const Il2CppType * type, void(*chunkReportFunc)(void* data, void* userData), void* userData)); +DO_API(const char*, il2cpp_class_get_namespace, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_instance_size, (Il2CppClass * klass)); +DO_API(size_t, il2cpp_class_num_fields, (const Il2CppClass * enumKlass)); +DO_API(bool, il2cpp_class_is_valuetype, (const Il2CppClass * klass)); +DO_API(int32_t, il2cpp_class_value_size, (Il2CppClass * klass, uint32_t * align)); +DO_API(bool, il2cpp_class_is_blittable, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_flags, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_abstract, (const Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_interface, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_array_element_size, (const Il2CppClass * klass)); +DO_API(Il2CppClass*, il2cpp_class_from_type, (const Il2CppType * type)); +DO_API(const Il2CppType*, il2cpp_class_get_type, (Il2CppClass * klass)); +DO_API(uint32_t, il2cpp_class_get_type_token, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_has_attribute, (Il2CppClass * klass, Il2CppClass * attr_class)); +DO_API(bool, il2cpp_class_has_references, (Il2CppClass * klass)); +DO_API(bool, il2cpp_class_is_enum, (const Il2CppClass * klass)); +DO_API(const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass * klass)); +DO_API(const char*, il2cpp_class_get_assemblyname, (const Il2CppClass * klass)); +DO_API(int, il2cpp_class_get_rank, (const Il2CppClass * klass)); +DO_API(uint32_t, il2cpp_class_get_data_size, (const Il2CppClass * klass)); +DO_API(void*, il2cpp_class_get_static_field_data, (const Il2CppClass * klass)); + +// testing only +DO_API(size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass * klass)); +DO_API(void, il2cpp_class_get_bitmap, (Il2CppClass * klass, size_t * bitmap)); + +// stats +DO_API(bool, il2cpp_stats_dump_to_file, (const char *path)); +DO_API(uint64_t, il2cpp_stats_get_value, (Il2CppStat stat)); + +// domain +DO_API(Il2CppDomain*, il2cpp_domain_get, ()); +DO_API(const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain * domain, const char* name)); +DO_API(const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain * domain, size_t * size)); + +// exception +DO_API_NO_RETURN(void, il2cpp_raise_exception, (Il2CppException*)); +DO_API(Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage * image, const char *name_space, const char *name, const char *msg)); +DO_API(Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg)); +DO_API(void, il2cpp_format_exception, (const Il2CppException * ex, char* message, int message_size)); +DO_API(void, il2cpp_format_stack_trace, (const Il2CppException * ex, char* output, int output_size)); +DO_API(void, il2cpp_unhandled_exception, (Il2CppException*)); + +// field +DO_API(int, il2cpp_field_get_flags, (FieldInfo * field)); +DO_API(const char*, il2cpp_field_get_name, (FieldInfo * field)); +DO_API(Il2CppClass*, il2cpp_field_get_parent, (FieldInfo * field)); +DO_API(size_t, il2cpp_field_get_offset, (FieldInfo * field)); +DO_API(const Il2CppType*, il2cpp_field_get_type, (FieldInfo * field)); +DO_API(void, il2cpp_field_get_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo * field, Il2CppObject * obj)); +DO_API(bool, il2cpp_field_has_attribute, (FieldInfo * field, Il2CppClass * attr_class)); +DO_API(void, il2cpp_field_set_value, (Il2CppObject * obj, FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_get_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_static_set_value, (FieldInfo * field, void *value)); +DO_API(void, il2cpp_field_set_value_object, (Il2CppObject * instance, FieldInfo * field, Il2CppObject * value)); +DO_API(bool, il2cpp_field_is_literal, (FieldInfo * field)); +// gc +DO_API(void, il2cpp_gc_collect, (int maxGenerations)); +DO_API(int32_t, il2cpp_gc_collect_a_little, ()); +DO_API(void, il2cpp_gc_disable, ()); +DO_API(void, il2cpp_gc_enable, ()); +DO_API(bool, il2cpp_gc_is_disabled, ()); +DO_API(int64_t, il2cpp_gc_get_max_time_slice_ns, ()); +DO_API(void, il2cpp_gc_set_max_time_slice_ns, (int64_t maxTimeSlice)); +DO_API(bool, il2cpp_gc_is_incremental, ()); +DO_API(int64_t, il2cpp_gc_get_used_size, ()); +DO_API(int64_t, il2cpp_gc_get_heap_size, ()); +DO_API(void, il2cpp_gc_wbarrier_set_field, (Il2CppObject * obj, void **targetAddress, void *object)); +DO_API(bool, il2cpp_gc_has_strict_wbarriers, ()); +DO_API(void, il2cpp_gc_set_external_allocation_tracker, (void(*func)(void*, size_t, int))); +DO_API(void, il2cpp_gc_set_external_wbarrier_tracker, (void(*func)(void**))); +DO_API(void, il2cpp_gc_foreach_heap, (void(*func)(void* data, void* userData), void* userData)); +DO_API(void, il2cpp_stop_gc_world, ()); +DO_API(void, il2cpp_start_gc_world, ()); +// gchandle +DO_API(uint32_t, il2cpp_gchandle_new, (Il2CppObject * obj, bool pinned)); +DO_API(uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject * obj, bool track_resurrection)); +DO_API(Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle)); +DO_API(void, il2cpp_gchandle_free, (uint32_t gchandle)); +DO_API(void , il2cpp_gchandle_foreach_get_target, (void(*func)(void* data, void* userData), void* userData)); + +// vm runtime info +DO_API(uint32_t, il2cpp_object_header_size, ()); +DO_API(uint32_t, il2cpp_array_object_header_size, ()); +DO_API(uint32_t, il2cpp_offset_of_array_length_in_array_object_header, ()); +DO_API(uint32_t, il2cpp_offset_of_array_bounds_in_array_object_header, ()); +DO_API(uint32_t, il2cpp_allocation_granularity, ()); + +// liveness +DO_API(void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass * filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped)); +DO_API(void, il2cpp_unity_liveness_calculation_end, (void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject * root, void* state)); +DO_API(void, il2cpp_unity_liveness_calculation_from_statics, (void* state)); + +// method +DO_API(const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo * method)); +DO_API(Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_name, (const MethodInfo * method)); +DO_API(const MethodInfo*, il2cpp_method_get_from_reflection, (const Il2CppReflectionMethod * method)); +DO_API(Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo * method, Il2CppClass * refclass)); +DO_API(bool, il2cpp_method_is_generic, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_inflated, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_is_instance, (const MethodInfo * method)); +DO_API(uint32_t, il2cpp_method_get_param_count, (const MethodInfo * method)); +DO_API(const Il2CppType*, il2cpp_method_get_param, (const MethodInfo * method, uint32_t index)); +DO_API(Il2CppClass*, il2cpp_method_get_class, (const MethodInfo * method)); +DO_API(bool, il2cpp_method_has_attribute, (const MethodInfo * method, Il2CppClass * attr_class)); +DO_API(uint32_t, il2cpp_method_get_flags, (const MethodInfo * method, uint32_t * iflags)); +DO_API(uint32_t, il2cpp_method_get_token, (const MethodInfo * method)); +DO_API(const char*, il2cpp_method_get_param_name, (const MethodInfo * method, uint32_t index)); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API(void, il2cpp_profiler_install, (Il2CppProfiler * prof, Il2CppProfileFunc shutdown_callback)); +DO_API(void, il2cpp_profiler_set_events, (Il2CppProfileFlags events)); +DO_API(void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave)); +DO_API(void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback)); +DO_API(void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback)); +DO_API(void, il2cpp_profiler_install_fileio, (Il2CppProfileFileIOFunc callback)); +DO_API(void, il2cpp_profiler_install_thread, (Il2CppProfileThreadFunc start, Il2CppProfileThreadFunc end)); + +#endif + +// property +DO_API(uint32_t, il2cpp_property_get_flags, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo * prop)); +DO_API(const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo * prop)); +DO_API(const char*, il2cpp_property_get_name, (PropertyInfo * prop)); +DO_API(Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo * prop)); + +// object +DO_API(Il2CppClass*, il2cpp_object_get_class, (Il2CppObject * obj)); +DO_API(uint32_t, il2cpp_object_get_size, (Il2CppObject * obj)); +DO_API(const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject * obj, const MethodInfo * method)); +DO_API(Il2CppObject*, il2cpp_object_new, (const Il2CppClass * klass)); +DO_API(void*, il2cpp_object_unbox, (Il2CppObject * obj)); + +DO_API(Il2CppObject*, il2cpp_value_box, (Il2CppClass * klass, void* data)); + +// monitor +DO_API(void, il2cpp_monitor_enter, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_enter, (Il2CppObject * obj, uint32_t timeout)); +DO_API(void, il2cpp_monitor_exit, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_pulse_all, (Il2CppObject * obj)); +DO_API(void, il2cpp_monitor_wait, (Il2CppObject * obj)); +DO_API(bool, il2cpp_monitor_try_wait, (Il2CppObject * obj, uint32_t timeout)); + +// runtime +DO_API(Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo * method, void *obj, void **params, Il2CppException **exc)); +DO_API(Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo * method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc)); +DO_API(void, il2cpp_runtime_class_init, (Il2CppClass * klass)); +DO_API(void, il2cpp_runtime_object_init, (Il2CppObject * obj)); + +DO_API(void, il2cpp_runtime_object_init_exception, (Il2CppObject * obj, Il2CppException** exc)); + +DO_API(void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value)); + +// string +DO_API(int32_t, il2cpp_string_length, (Il2CppString * str)); +DO_API(Il2CppChar*, il2cpp_string_chars, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_new, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length)); +DO_API(Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar * text, int32_t len)); +DO_API(Il2CppString*, il2cpp_string_new_wrapper, (const char* str)); +DO_API(Il2CppString*, il2cpp_string_intern, (Il2CppString * str)); +DO_API(Il2CppString*, il2cpp_string_is_interned, (Il2CppString * str)); + +// thread +DO_API(Il2CppThread*, il2cpp_thread_current, ()); +DO_API(Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain * domain)); +DO_API(void, il2cpp_thread_detach, (Il2CppThread * thread)); + +DO_API(Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t * size)); +DO_API(bool, il2cpp_is_vm_thread, (Il2CppThread * thread)); + +// stacktrace +DO_API(void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data)); +DO_API(void, il2cpp_thread_walk_frame_stack, (Il2CppThread * thread, Il2CppFrameWalkFunc func, void* user_data)); +DO_API(bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_thread_get_top_frame, (Il2CppThread * thread, Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo * frame)); +DO_API(bool, il2cpp_thread_get_frame_at, (Il2CppThread * thread, int32_t offset, Il2CppStackFrameInfo * frame)); +DO_API(int32_t, il2cpp_current_thread_get_stack_depth, ()); +DO_API(int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread * thread)); +DO_API(void, il2cpp_override_stack_backtrace, (Il2CppBacktraceFunc stackBacktraceFunc)); + +// type +DO_API(Il2CppObject*, il2cpp_type_get_object, (const Il2CppType * type)); +DO_API(int, il2cpp_type_get_type, (const Il2CppType * type)); +DO_API(Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType * type)); +DO_API(char*, il2cpp_type_get_name, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_is_byref, (const Il2CppType * type)); +DO_API(uint32_t, il2cpp_type_get_attrs, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_equals, (const Il2CppType * type, const Il2CppType * otherType)); +DO_API(char*, il2cpp_type_get_assembly_qualified_name, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_is_static, (const Il2CppType * type)); +DO_API(bool, il2cpp_type_is_pointer_type, (const Il2CppType * type)); + +// image +DO_API(const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_name, (const Il2CppImage * image)); +DO_API(const char*, il2cpp_image_get_filename, (const Il2CppImage * image)); +DO_API(const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage * image)); + +DO_API(size_t, il2cpp_image_get_class_count, (const Il2CppImage * image)); +DO_API(const Il2CppClass*, il2cpp_image_get_class, (const Il2CppImage * image, size_t index)); + +// Memory information +DO_API(Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, ()); +DO_API(void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot * snapshot)); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +// Logging +DO_API(void, il2cpp_register_log_callback, (Il2CppLogCallback method)); + +// Debugger +DO_API(void, il2cpp_debugger_set_agent_options, (const char* options)); +DO_API(bool, il2cpp_is_debugger_attached, ()); +DO_API(void, il2cpp_register_debugger_agent_transport, (Il2CppDebuggerTransport * debuggerTransport)); + +// Debug metadata +DO_API(bool, il2cpp_debug_get_method_info, (const MethodInfo*, Il2CppMethodDebugInfo * methodDebugInfo)); + +// TLS module +DO_API(void, il2cpp_unity_install_unitytls_interface, (const void* unitytlsInterfaceStruct)); + +// custom attributes +DO_API(Il2CppCustomAttrInfo*, il2cpp_custom_attrs_from_class, (Il2CppClass * klass)); +DO_API(Il2CppCustomAttrInfo*, il2cpp_custom_attrs_from_method, (const MethodInfo * method)); + +DO_API(Il2CppObject*, il2cpp_custom_attrs_get_attr, (Il2CppCustomAttrInfo * ainfo, Il2CppClass * attr_klass)); +DO_API(bool, il2cpp_custom_attrs_has_attr, (Il2CppCustomAttrInfo * ainfo, Il2CppClass * attr_klass)); +DO_API(Il2CppArray*, il2cpp_custom_attrs_construct, (Il2CppCustomAttrInfo * cinfo)); + +DO_API(void, il2cpp_custom_attrs_free, (Il2CppCustomAttrInfo * ainfo)); + +// Il2CppClass user data for GetComponent optimization +DO_API(void, il2cpp_class_set_userdata, (Il2CppClass * klass, void* userdata)); +DO_API(int, il2cpp_class_get_userdata_offset, ()); diff --git a/module/src/main/cpp/il2cppapi/2019.3.7f1/il2cpp-class.h b/module/src/main/cpp/il2cppapi/2019.3.7f1/il2cpp-class.h new file mode 100644 index 00000000..06e81ad6 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/2019.3.7f1/il2cpp-class.h @@ -0,0 +1,1412 @@ +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef struct Il2CppCustomAttrInfo Il2CppCustomAttrInfo; +typedef enum +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19, + IL2CPP_PROFILE_FILEIO = 1 << 20 +} Il2CppProfileFlags; +typedef enum +{ + IL2CPP_PROFILE_FILEIO_WRITE = 0, + IL2CPP_PROFILE_FILEIO_READ +} Il2CppProfileFileIOKind; +typedef enum +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef void(*Il2CppMethodPointer)(); +typedef struct Il2CppMethodDebugInfo +{ + Il2CppMethodPointer methodPointer; + int32_t code_size; + const char *file; +} Il2CppMethodDebugInfo; +typedef struct +{ + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef struct +{ + const char *name; + void(*connect)(const char *address); + int(*wait_for_attach)(void); + void(*close1)(void); + void(*close2)(void); + int(*send)(void *buf, int len); + int(*recv)(void *buf, int len); +} Il2CppDebuggerTransport; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef void (*Il2CppProfileFileIOFunc) (Il2CppProfiler* prof, Il2CppProfileFileIOKind kind, int count); +typedef void (*Il2CppProfileThreadFunc) (Il2CppProfiler *prof, unsigned long tid); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef void (*Il2CppLogCallback)(const char*); +typedef size_t(*Il2CppBacktraceFunc) (Il2CppMethodPointer* buffer, size_t maxSize); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef uintptr_t il2cpp_array_size_t; +typedef void ( *SynchronizationContextCallback)(intptr_t arg); +typedef uint32_t Il2CppMethodSlot; +static const uint32_t kInvalidIl2CppMethodSlot = 65535; +static const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST = 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef enum +{ + IL2CPP_TOKEN_MODULE = 0x00000000, + IL2CPP_TOKEN_TYPE_REF = 0x01000000, + IL2CPP_TOKEN_TYPE_DEF = 0x02000000, + IL2CPP_TOKEN_FIELD_DEF = 0x04000000, + IL2CPP_TOKEN_METHOD_DEF = 0x06000000, + IL2CPP_TOKEN_PARAM_DEF = 0x08000000, + IL2CPP_TOKEN_INTERFACE_IMPL = 0x09000000, + IL2CPP_TOKEN_MEMBER_REF = 0x0a000000, + IL2CPP_TOKEN_CUSTOM_ATTRIBUTE = 0x0c000000, + IL2CPP_TOKEN_PERMISSION = 0x0e000000, + IL2CPP_TOKEN_SIGNATURE = 0x11000000, + IL2CPP_TOKEN_EVENT = 0x14000000, + IL2CPP_TOKEN_PROPERTY = 0x17000000, + IL2CPP_TOKEN_MODULE_REF = 0x1a000000, + IL2CPP_TOKEN_TYPE_SPEC = 0x1b000000, + IL2CPP_TOKEN_ASSEMBLY = 0x20000000, + IL2CPP_TOKEN_ASSEMBLY_REF = 0x23000000, + IL2CPP_TOKEN_FILE = 0x26000000, + IL2CPP_TOKEN_EXPORTED_TYPE = 0x27000000, + IL2CPP_TOKEN_MANIFEST_RESOURCE = 0x28000000, + IL2CPP_TOKEN_GENERIC_PARAM = 0x2a000000, + IL2CPP_TOKEN_METHOD_SPEC = 0x2b000000, +} Il2CppTokenType; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t InteropDataIndex; +static const TypeIndex kTypeIndexInvalid = -1; +static const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +static const DefaultValueDataIndex kDefaultValueIndexNull = -1; +static const CustomAttributeIndex kCustomAttributeIndexInvalid = -1; +static const EventIndex kEventIndexInvalid = -1; +static const FieldIndex kFieldIndexInvalid = -1; +static const MethodIndex kMethodIndexInvalid = -1; +static const PropertyIndex kPropertyIndexInvalid = -1; +static const GenericContainerIndex kGenericContainerIndexInvalid = -1; +static const GenericParameterIndex kGenericParameterIndexInvalid = -1; +static const RGCTXIndex kRGCTXIndexInvalid = -1; +static const StringLiteralIndex kStringLiteralIndexInvalid = -1; +static const InteropDataIndex kInteropDataIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD, + IL2CPP_RGCTX_DATA_ARRAY, +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + GenericContainerIndex genericContainerIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + GenericContainerIndex genericContainerIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +static const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyNameDefinition +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t public_key_token[8]; +} Il2CppAssemblyNameDefinition; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + MethodIndex entryPointIndex; + uint32_t token; + CustomAttributeIndex customAttributeStart; + uint32_t customAttributeCount; +} Il2CppImageDefinition; +typedef struct Il2CppAssemblyDefinition +{ + ImageIndex imageIndex; + uint32_t token; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyNameDefinition aname; +} Il2CppAssemblyDefinition; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + uint32_t token; + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +typedef struct Il2CppRange +{ + int32_t start; + int32_t length; +} Il2CppRange; +typedef struct Il2CppWindowsRuntimeTypeNamePair +{ + StringIndex nameIndex; + TypeIndex typeIndex; +} Il2CppWindowsRuntimeTypeNamePair; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; + int32_t unresolvedVirtualCallParameterTypesOffset; + int32_t unresolvedVirtualCallParameterTypesCount; + int32_t unresolvedVirtualCallParameterRangesOffset; + int32_t unresolvedVirtualCallParameterRangesCount; + int32_t windowsRuntimeTypeNamesOffset; + int32_t windowsRuntimeTypeNamesSize; + int32_t exportedTypeDefinitionsOffset; + int32_t exportedTypeDefinitionsCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union + { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum Il2CppCallConvention +{ + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE, + CHARSET_NOT_SPECIFIED +} Il2CppCharSet; +typedef struct Il2CppHString__ +{ + int unused; +} Il2CppHString__; +typedef Il2CppHString__* Il2CppHString; +typedef struct Il2CppHStringHeader +{ + union + { + void* Reserved1; + char Reserved2[24]; + } Reserved; +} Il2CppHStringHeader; +typedef struct Il2CppGuid +{ + uint32_t data1; + uint16_t data2; + uint16_t data3; + uint8_t data4[8]; +} Il2CppGuid; +typedef struct Il2CppSafeArrayBound +{ + uint32_t element_count; + int32_t lower_bound; +} Il2CppSafeArrayBound; +typedef struct Il2CppSafeArray +{ + uint16_t dimension_count; + uint16_t features; + uint32_t element_size; + uint32_t lock_count; + void* data; + Il2CppSafeArrayBound bounds[1]; +} Il2CppSafeArray; +typedef struct Il2CppWin32Decimal +{ + uint16_t reserved; + union + { + struct + { + uint8_t scale; + uint8_t sign; + } s; + uint16_t signscale; + } u; + uint32_t hi32; + union + { + struct + { + uint32_t lo32; + uint32_t mid32; + } s2; + uint64_t lo64; + } u2; +} Il2CppWin32Decimal; +typedef int16_t IL2CPP_VARIANT_BOOL; +typedef enum Il2CppVarType +{ + IL2CPP_VT_EMPTY = 0, + IL2CPP_VT_NULL = 1, + IL2CPP_VT_I2 = 2, + IL2CPP_VT_I4 = 3, + IL2CPP_VT_R4 = 4, + IL2CPP_VT_R8 = 5, + IL2CPP_VT_CY = 6, + IL2CPP_VT_DATE = 7, + IL2CPP_VT_BSTR = 8, + IL2CPP_VT_DISPATCH = 9, + IL2CPP_VT_ERROR = 10, + IL2CPP_VT_BOOL = 11, + IL2CPP_VT_VARIANT = 12, + IL2CPP_VT_UNKNOWN = 13, + IL2CPP_VT_DECIMAL = 14, + IL2CPP_VT_I1 = 16, + IL2CPP_VT_UI1 = 17, + IL2CPP_VT_UI2 = 18, + IL2CPP_VT_UI4 = 19, + IL2CPP_VT_I8 = 20, + IL2CPP_VT_UI8 = 21, + IL2CPP_VT_INT = 22, + IL2CPP_VT_UINT = 23, + IL2CPP_VT_VOID = 24, + IL2CPP_VT_HRESULT = 25, + IL2CPP_VT_PTR = 26, + IL2CPP_VT_SAFEARRAY = 27, + IL2CPP_VT_CARRAY = 28, + IL2CPP_VT_USERDEFINED = 29, + IL2CPP_VT_LPSTR = 30, + IL2CPP_VT_LPWSTR = 31, + IL2CPP_VT_RECORD = 36, + IL2CPP_VT_INT_PTR = 37, + IL2CPP_VT_UINT_PTR = 38, + IL2CPP_VT_FILETIME = 64, + IL2CPP_VT_BLOB = 65, + IL2CPP_VT_STREAM = 66, + IL2CPP_VT_STORAGE = 67, + IL2CPP_VT_STREAMED_OBJECT = 68, + IL2CPP_VT_STORED_OBJECT = 69, + IL2CPP_VT_BLOB_OBJECT = 70, + IL2CPP_VT_CF = 71, + IL2CPP_VT_CLSID = 72, + IL2CPP_VT_VERSIONED_STREAM = 73, + IL2CPP_VT_BSTR_BLOB = 0xfff, + IL2CPP_VT_VECTOR = 0x1000, + IL2CPP_VT_ARRAY = 0x2000, + IL2CPP_VT_BYREF = 0x4000, + IL2CPP_VT_RESERVED = 0x8000, + IL2CPP_VT_ILLEGAL = 0xffff, + IL2CPP_VT_ILLEGALMASKED = 0xfff, + IL2CPP_VT_TYPEMASK = 0xfff, +} Il2CppVarType; +typedef struct Il2CppVariant Il2CppVariant; +typedef struct Il2CppIUnknown Il2CppIUnknown; +typedef struct Il2CppVariant +{ + union + { + struct __tagVARIANT + { + uint16_t type; + uint16_t reserved1; + uint16_t reserved2; + uint16_t reserved3; + union + { + int64_t llVal; + int32_t lVal; + uint8_t bVal; + int16_t iVal; + float fltVal; + double dblVal; + IL2CPP_VARIANT_BOOL boolVal; + int32_t scode; + int64_t cyVal; + double date; + Il2CppChar* bstrVal; + Il2CppIUnknown* punkVal; + void* pdispVal; + Il2CppSafeArray* parray; + uint8_t* pbVal; + int16_t* piVal; + int32_t* plVal; + int64_t* pllVal; + float* pfltVal; + double* pdblVal; + IL2CPP_VARIANT_BOOL* pboolVal; + int32_t* pscode; + int64_t* pcyVal; + double* pdate; + Il2CppChar* pbstrVal; + Il2CppIUnknown** ppunkVal; + void** ppdispVal; + Il2CppSafeArray** pparray; + Il2CppVariant* pvarVal; + void* byref; + char cVal; + uint16_t uiVal; + uint32_t ulVal; + uint64_t ullVal; + int intVal; + unsigned int uintVal; + Il2CppWin32Decimal* pdecVal; + char* pcVal; + uint16_t* puiVal; + uint32_t* pulVal; + uint64_t* pullVal; + int* pintVal; + unsigned int* puintVal; + struct __tagBRECORD + { + void* pvRecord; + void* pRecInfo; + } n4; + } n3; + } n2; + Il2CppWin32Decimal decVal; + } n1; +} Il2CppVariant; +typedef struct Il2CppFileTime +{ + uint32_t low; + uint32_t high; +} Il2CppFileTime; +typedef struct Il2CppStatStg +{ + Il2CppChar* name; + uint32_t type; + uint64_t size; + Il2CppFileTime mtime; + Il2CppFileTime ctime; + Il2CppFileTime atime; + uint32_t mode; + uint32_t locks; + Il2CppGuid clsid; + uint32_t state; + uint32_t reserved; +} Il2CppStatStg; +typedef enum Il2CppWindowsRuntimeTypeKind +{ + kTypeKindPrimitive = 0, + kTypeKindMetadata, + kTypeKindCustom +} Il2CppWindowsRuntimeTypeKind; +typedef struct Il2CppWindowsRuntimeTypeName +{ + Il2CppHString typeName; + enum Il2CppWindowsRuntimeTypeKind typeKind; +} Il2CppWindowsRuntimeTypeName; +typedef void (*PInvokeMarshalToNativeFunc)(void* managedStructure, void* marshaledStructure); +typedef void (*PInvokeMarshalFromNativeFunc)(void* marshaledStructure, void* managedStructure); +typedef void (*PInvokeMarshalCleanupFunc)(void* marshaledStructure); +typedef struct Il2CppIUnknown* (*CreateCCWFunc)(Il2CppObject* obj); +typedef struct Il2CppInteropData +{ + Il2CppMethodPointer delegatePInvokeWrapperFunction; + PInvokeMarshalToNativeFunc pinvokeMarshalToNativeFunction; + PInvokeMarshalFromNativeFunc pinvokeMarshalFromNativeFunction; + PInvokeMarshalCleanupFunc pinvokeMarshalCleanupFunction; + CreateCCWFunc createCCWFunction; + const Il2CppGuid* guid; + const Il2CppType* type; +} Il2CppInteropData; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppAppDomainSetup Il2CppAppDomainSetup; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct Il2CppCodeGenModule Il2CppCodeGenModule; +typedef struct VirtualInvokeData +{ + Il2CppMethodPointer methodPtr; + const MethodInfo* method; +} VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +typedef struct Il2CppDefaults +{ + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *internal_thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_ireadonlylist_class; + Il2CppClass *generic_ireadonlycollection_class; + Il2CppClass *runtimetype_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *attribute_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *mono_assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *mono_parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; + Il2CppClass *threadpool_wait_callback_class; + MethodInfo *threadpool_perform_wait_callback_method; + Il2CppClass *mono_method_message_class; + Il2CppClass* ireference_class; + Il2CppClass* ireferencearray_class; + Il2CppClass* ikey_value_pair_class; + Il2CppClass* key_value_pair_class; + Il2CppClass* windows_foundation_uri_class; + Il2CppClass* windows_foundation_iuri_runtime_class_class; + Il2CppClass* system_uri_class; + Il2CppClass* system_guid_class; + Il2CppClass* sbyte_shared_enum; + Il2CppClass* int16_shared_enum; + Il2CppClass* int32_shared_enum; + Il2CppClass* int64_shared_enum; + Il2CppClass* byte_shared_enum; + Il2CppClass* uint16_shared_enum; + Il2CppClass* uint32_shared_enum; + Il2CppClass* uint64_shared_enum; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(Il2CppMethodPointer, const MethodInfo*, void*, void**); +typedef enum MethodVariableKind +{ + kMethodVariableKind_This, + kMethodVariableKind_Parameter, + kMethodVariableKind_LocalVariable +} MethodVariableKind; +typedef enum SequencePointKind +{ + kSequencePointKind_Normal, + kSequencePointKind_StepOut +} SequencePointKind; +typedef struct Il2CppMethodExecutionContextInfo +{ + TypeIndex typeIndex; + int32_t nameIndex; + int32_t scopeIndex; +} Il2CppMethodExecutionContextInfo; +typedef struct Il2CppMethodExecutionContextInfoIndex +{ + int32_t startIndex; + int32_t count; +} Il2CppMethodExecutionContextInfoIndex; +typedef struct Il2CppMethodScope +{ + int32_t startOffset; + int32_t endOffset; +} Il2CppMethodScope; +typedef struct Il2CppMethodHeaderInfo +{ + int32_t code_size; + int32_t startScope; + int32_t numScopes; +} Il2CppMethodHeaderInfo; +typedef struct Il2CppSequencePointSourceFile +{ + const char *file; + uint8_t hash[16]; +} Il2CppSequencePointSourceFile; +typedef struct Il2CppTypeSourceFilePair +{ + TypeDefinitionIndex klassIndex; + int32_t sourceFileIndex; +} Il2CppTypeSourceFilePair; +typedef struct Il2CppSequencePoint +{ + MethodIndex methodDefinitionIndex; + int32_t sourceFileIndex; + int32_t lineStart, lineEnd; + int32_t columnStart, columnEnd; + int32_t ilOffset; + SequencePointKind kind; + int32_t isActive; + int32_t id; +} Il2CppSequencePoint; +typedef struct Il2CppCatchPoint +{ + MethodIndex methodDefinitionIndex; + TypeIndex catchTypeIndex; + int32_t ilOffset; + int8_t tryId; + int8_t parentTryId; +} Il2CppCatchPoint; +typedef struct Il2CppDebuggerMetadataRegistration +{ + Il2CppMethodExecutionContextInfo* methodExecutionContextInfos; + Il2CppMethodExecutionContextInfoIndex* methodExecutionContextInfoIndexes; + Il2CppMethodScope* methodScopes; + Il2CppMethodHeaderInfo* methodHeaderInfos; + Il2CppSequencePointSourceFile* sequencePointSourceFiles; + int32_t numSequencePoints; + Il2CppSequencePoint* sequencePoints; + int32_t numCatchPoints; + Il2CppCatchPoint* catchPoints; + int32_t numTypeSourceFileEntries; + Il2CppTypeSourceFilePair* typeSourceFiles; + const char** methodExecutionContextInfoStrings; +} Il2CppDebuggerMetadataRegistration; +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *klass; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; + uint8_t wrapper_type : 1; + uint8_t is_marshaled_from_native : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + Il2CppType byval_arg; + Il2CppType this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + const Il2CppInteropData* interopData; + Il2CppClass* klass; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + void *unity_user_data; + uint32_t initializationExceptionGCHandle; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) size_t cctor_thread; + GenericContainerIndex genericContainerIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t genericRecursionDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t naturalAligment; + uint8_t packingSize; + uint8_t initialized_and_no_error : 1; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; + uint8_t is_vtable_initialized : 1; + uint8_t has_initialization_error : 1; + VirtualInvokeData vtable[32]; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppAppDomainSetup* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; + volatile int threadpool_jobs; + void* agent_info; +} Il2CppDomain; +typedef struct Il2CppAssemblyName +{ + const char* name; + const char* culture; + const char* hash_value; + const char* public_key; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t public_key_token[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImage +{ + const char* name; + const char *nameNoExt; + Il2CppAssembly* assembly; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + TypeDefinitionIndex exportedTypeStart; + uint32_t exportedTypeCount; + CustomAttributeIndex customAttributeStart; + uint32_t customAttributeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable * nameToClassHashTable; + const Il2CppCodeGenModule* codeGenModule; + uint32_t token; + uint8_t dynamic; +} Il2CppImage; +typedef struct Il2CppAssembly +{ + Il2CppImage* image; + uint32_t token; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppCodeGenOptions +{ + uint8_t enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppTokenIndexPair +{ + uint32_t token; + int32_t index; +} Il2CppTokenIndexPair; +typedef struct Il2CppTokenRangePair +{ + uint32_t token; + Il2CppRange range; +} Il2CppTokenRangePair; +typedef struct Il2CppTokenIndexMethodTuple +{ + uint32_t token; + int32_t index; + void** method; + uint32_t genericMethodIndex; +} Il2CppTokenIndexMethodTuple; +typedef struct Il2CppWindowsRuntimeFactoryTableEntry +{ + const Il2CppType* type; + Il2CppMethodPointer createFactoryFunction; +} Il2CppWindowsRuntimeFactoryTableEntry; +typedef struct Il2CppCodeGenModule +{ + const char* moduleName; + const uint32_t methodPointerCount; + const Il2CppMethodPointer* methodPointers; + const int32_t* invokerIndices; + const uint32_t reversePInvokeWrapperCount; + const Il2CppTokenIndexMethodTuple* reversePInvokeWrapperIndices; + const uint32_t rgctxRangesCount; + const Il2CppTokenRangePair* rgctxRanges; + const uint32_t rgctxsCount; + const Il2CppRGCTXDefinition* rgctxs; + const Il2CppDebuggerMetadataRegistration *debuggerMetadata; +} Il2CppCodeGenModule; +typedef struct Il2CppCodeRegistration +{ + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + uint32_t unresolvedVirtualCallCount; + const Il2CppMethodPointer* unresolvedVirtualCallPointers; + uint32_t interopDataCount; + Il2CppInteropData* interopData; + uint32_t windowsRuntimeFactoryCount; + Il2CppWindowsRuntimeFactoryTableEntry* windowsRuntimeFactoryTable; + uint32_t codeGenModulesCount; + const Il2CppCodeGenModule** codeGenModules; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/5.3.2f1/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/5.3.2f1/il2cpp-api-functions.h new file mode 100644 index 00000000..46f1d098 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.3.2f1/il2cpp-api-functions.h @@ -0,0 +1,250 @@ + +DO_API( void, il2cpp_init, (const char* domain_name) ); +DO_API( void, il2cpp_shutdown, () ); +DO_API( void, il2cpp_set_config_dir, (const char *config_path) ); +DO_API( void, il2cpp_set_data_dir, (const char *data_path) ); +DO_API( void, il2cpp_set_commandline_arguments, (int argc, const char* argv[], const char* basedir) ); +DO_API( void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks* callbacks) ); +DO_API( Il2CppImage*, il2cpp_get_corlib, () ); +DO_API( void, il2cpp_add_internal_call, (const char* name, methodPointerType method) ); +DO_API( methodPointerType, il2cpp_resolve_icall, (const char* name) ); + +DO_API( void*, il2cpp_alloc, (size_t size) ); +DO_API( void, il2cpp_free, (void* ptr) ); + +// array +DO_API( Il2CppClass*, il2cpp_array_class_get, (Il2CppClass *element_class, uint32_t rank) ); +DO_API( uint32_t, il2cpp_array_length, (Il2CppArray* array) ); +DO_API( uint32_t, il2cpp_array_get_byte_length, (Il2CppArray *array) ); +DO_API( Il2CppArray*, il2cpp_array_new, (Il2CppClass *elementTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass *arrayTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_full, (Il2CppClass *array_class, il2cpp_array_size_t *lengths, il2cpp_array_size_t *lower_bounds) ); +DO_API( Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass *element_class, uint32_t rank, bool bounded) ); +DO_API( int, il2cpp_array_element_size, (const Il2CppClass* array_class) ); + +// assembly +DO_API( Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly *assembly) ); + +// class +DO_API( const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_generic, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_inflated, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_assignable_from, (Il2CppClass *klass, Il2CppClass *oklass) ); +DO_API( bool, il2cpp_class_is_subclass_of, (Il2CppClass *klass, Il2CppClass *klassc, bool check_interfaces) ); +DO_API( bool, il2cpp_class_has_parent, (Il2CppClass* klass, Il2CppClass* klassc) ); +DO_API( Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType* type) ); +DO_API( Il2CppClass*, il2cpp_class_from_name, (Il2CppImage* image, const char* namespaze, const char *name) ); +DO_API( Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType *type) ); +DO_API( Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass *klass) ); +DO_API( const EventInfo*, il2cpp_class_get_events, (Il2CppClass *klass, void* *iter)); +DO_API( FieldInfo*, il2cpp_class_get_fields, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass *klass, const char *name) ); +DO_API( FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass* klass, const char *name) ); +DO_API( const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass *klass, void* *iter) ); +DO_API( const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass *klass, const char* name, int argsCount) ); +DO_API( const char*, il2cpp_class_get_name, (Il2CppClass *klass) ); +DO_API( const char*, il2cpp_class_get_namespace, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_instance_size, (Il2CppClass *klass) ); +DO_API( size_t, il2cpp_class_num_fields, (const Il2CppClass* enumKlass) ); +DO_API( bool, il2cpp_class_is_valuetype, (const Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_value_size, (Il2CppClass *klass, uint32_t *align) ); +DO_API( int, il2cpp_class_get_flags, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_abstract, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_interface, (const Il2CppClass *klass) ); +DO_API( int, il2cpp_class_array_element_size, (const Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_from_type, (Il2CppType *type) ); +DO_API( const Il2CppType*, il2cpp_class_get_type, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_has_attribute, (Il2CppClass *klass, Il2CppClass *attr_class) ); +DO_API( bool, il2cpp_class_has_references, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_enum, (const Il2CppClass *klass) ); +DO_API( const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass* klass) ); +DO_API( const char*, il2cpp_class_get_assemblyname, (const Il2CppClass *klass) ); + +// testing only +DO_API( size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass *klass) ); +DO_API( void, il2cpp_class_get_bitmap, (Il2CppClass *klass, size_t* bitmap) ); + +// stats +DO_API( bool, il2cpp_stats_dump_to_file, (const char *path) ); +DO_API( uint64_t, il2cpp_stats_get_value, (Il2CppStat stat) ); + +// domain +DO_API( Il2CppDomain*, il2cpp_domain_get, () ); +DO_API( const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain* domain, const char* name) ); +DO_API( const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain* domain, size_t* size) ); + +// exception +DO_API( void, il2cpp_raise_exception, (Il2CppException*) ); +DO_API( Il2CppException*, il2cpp_exception_from_name_msg, (Il2CppImage* image, const char *name_space, const char *name, const char *msg) ); +DO_API( Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg) ); +DO_API( void, il2cpp_format_exception, (const Il2CppException* ex, char* message, int message_size) ); +DO_API( void, il2cpp_format_stack_trace, (const Il2CppException* ex, char* output, int output_size) ); +DO_API( void, il2cpp_unhandled_exception, (Il2CppException*) ); + +// field +DO_API( int, il2cpp_field_get_flags, (FieldInfo *field) ); +DO_API( const char*, il2cpp_field_get_name, (FieldInfo *field) ); +DO_API( Il2CppClass*, il2cpp_field_get_parent, (FieldInfo *field) ); +DO_API( size_t, il2cpp_field_get_offset, (FieldInfo *field) ); +DO_API( const Il2CppType*, il2cpp_field_get_type, (FieldInfo *field) ); +DO_API( void, il2cpp_field_get_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo *field, Il2CppObject *obj) ); +DO_API( bool, il2cpp_field_has_attribute, (FieldInfo *field, Il2CppClass *attr_class) ); +DO_API( void, il2cpp_field_set_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_get_value, (FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_set_value, (FieldInfo *field, void *value) ); + +// gc +DO_API( void, il2cpp_gc_collect, (int maxGenerations) ); +DO_API( int64_t, il2cpp_gc_get_used_size, () ); +DO_API( int64_t, il2cpp_gc_get_heap_size, () ); + +// gchandle +DO_API( uint32_t, il2cpp_gchandle_new, (Il2CppObject *obj, bool pinned) ); +DO_API( uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject *obj, bool track_resurrection) ); +DO_API( Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle) ); +DO_API( void, il2cpp_gchandle_free, (uint32_t gchandle) ); + +// liveness +DO_API( void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass* filter, int max_object_count, register_object_callback callback, void* userdata, WorldChangedCallback onWorldStarted, WorldChangedCallback onWorldStopped) ); +DO_API( void, il2cpp_unity_liveness_calculation_end, (void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject* root, void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_statics, (void* state) ); + +// method +DO_API( const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo* method) ); +DO_API( Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo* method) ); +DO_API( const char*, il2cpp_method_get_name, (const MethodInfo *method) ); +DO_API( Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo *method, Il2CppClass *refclass) ); +DO_API( bool, il2cpp_method_is_generic, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_inflated, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_instance, (const MethodInfo *method) ); +DO_API( uint32_t, il2cpp_method_get_param_count, (const MethodInfo *method) ); +DO_API( const Il2CppType*, il2cpp_method_get_param, (const MethodInfo *method, uint32_t index) ); +DO_API( Il2CppClass*, il2cpp_method_get_class, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_has_attribute, (const MethodInfo *method, Il2CppClass *attr_class) ); +DO_API( uint32_t, il2cpp_method_get_flags, (const MethodInfo *method, uint32_t *iflags) ); +DO_API( uint32_t, il2cpp_method_get_token, (const MethodInfo *method) ); +DO_API( const char*, il2cpp_method_get_param_name, (const MethodInfo *method, uint32_t index) ); + +// profiler +DO_API( void, il2cpp_profiler_install, (Il2CppProfiler *prof, Il2CppProfileFunc shutdown_callback) ); +DO_API( void, il2cpp_profiler_set_events, (Il2CppProfileFlags events) ); +DO_API( void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave) ); +DO_API( void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback) ); +DO_API( void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback) ); + +// property +DO_API( uint32_t, il2cpp_property_get_flags, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo *prop) ); +DO_API( const char*, il2cpp_property_get_name, (PropertyInfo *prop) ); +DO_API( Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo *prop) ); + +// object +DO_API( Il2CppClass*, il2cpp_object_get_class, (Il2CppObject* obj) ); +DO_API( uint32_t, il2cpp_object_get_size, (Il2CppObject* obj) ); +DO_API( const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject *obj, const MethodInfo *method) ); +DO_API( Il2CppObject*, il2cpp_object_new, (const Il2CppClass *klass) ); +DO_API( void*, il2cpp_object_unbox, (Il2CppObject* obj) ); + +DO_API( Il2CppObject*, il2cpp_value_box, (Il2CppClass *klass, void* data) ); + +// monitor +DO_API( void, il2cpp_monitor_enter, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_enter, (Il2CppObject* obj, uint32_t timeout) ); +DO_API( void, il2cpp_monitor_exit, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse_all, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_wait, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_wait, (Il2CppObject* obj, uint32_t timeout) ); + +// runtime +DO_API( Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo *method, void *obj, void **params, Il2CppObject **exc) ); +DO_API( Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo *method, void *obj, Il2CppObject **params, int paramCount, Il2CppObject **exc) ); +DO_API( void, il2cpp_runtime_class_init, (Il2CppClass* klass) ); +DO_API( void, il2cpp_runtime_object_init, (Il2CppObject* obj) ); + +DO_API( void, il2cpp_runtime_object_init_exception, (Il2CppObject* obj, Il2CppObject** exc) ); + +DO_API( void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value) ); + +// delegate +DO_API( Il2CppAsyncResult*, il2cpp_delegate_begin_invoke, (Il2CppDelegate* delegate, void** params, Il2CppDelegate* asyncCallback, Il2CppObject* state) ); +DO_API( Il2CppObject*, il2cpp_delegate_end_invoke, (Il2CppAsyncResult* asyncResult, void **out_args) ); + +// string +DO_API( int32_t, il2cpp_string_length, (Il2CppString* str) ); +DO_API( uint16_t*, il2cpp_string_chars, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_new, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length) ); +DO_API( Il2CppString*, il2cpp_string_new_utf16, (const uint16_t *text, int32_t len) ); +DO_API( Il2CppString*, il2cpp_string_new_wrapper, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_intern, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_is_interned, (Il2CppString* str) ); + +// thread +DO_API( char*, il2cpp_thread_get_name, (Il2CppThread *thread, uint32_t *len) ); +DO_API( Il2CppThread*, il2cpp_thread_current, () ); +DO_API( Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain *domain) ); +DO_API( void, il2cpp_thread_detach, (Il2CppThread *thread) ); + +DO_API( Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t *size) ); +DO_API( bool, il2cpp_is_vm_thread, (Il2CppThread *thread) ); + +// stacktrace +DO_API( void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( void, il2cpp_thread_walk_frame_stack, (Il2CppThread* thread, Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_top_frame, (Il2CppThread* thread, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_frame_at, (Il2CppThread* thread, int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( int32_t, il2cpp_current_thread_get_stack_depth, () ); +DO_API( int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread *thread) ); + +// type +DO_API( Il2CppObject*, il2cpp_type_get_object, (const Il2CppType *type) ); +DO_API( int, il2cpp_type_get_type, (const Il2CppType *type) ); +DO_API( Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType *type) ); +DO_API( char*, il2cpp_type_get_name, (const Il2CppType *type) ); + +// image +DO_API( const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_name, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_filename, (const Il2CppImage *image) ); +DO_API( const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage* image) ); + +// Memory information +DO_API( Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, () ); +DO_API( void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot* snapshot) ); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +#if IL2CPP_DEBUGGER_ENABLED +// debug +DO_API( const Il2CppDebugTypeInfo*, il2cpp_debug_get_class_info, (const Il2CppClass *klass) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_class_get_document, (const Il2CppDebugTypeInfo* info) ); +DO_API( const char*, il2cpp_debug_document_get_filename, (const Il2CppDebugDocument* document) ); +DO_API( const char*, il2cpp_debug_document_get_directory, (const Il2CppDebugDocument* document) ); +DO_API( const Il2CppDebugMethodInfo*, il2cpp_debug_get_method_info, (const MethodInfo *method) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_method_get_document, (const Il2CppDebugMethodInfo* info) ); +DO_API( const int32_t*, il2cpp_debug_method_get_offset_table, (const Il2CppDebugMethodInfo* info) ); +DO_API( size_t, il2cpp_debug_method_get_code_size, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_update_frame_il_offset, (int32_t il_offset) ); +DO_API( const Il2CppDebugLocalsInfo**, il2cpp_debug_method_get_locals_info, (const Il2CppDebugMethodInfo* info) ); +DO_API( const Il2CppClass*, il2cpp_debug_local_get_type, (const Il2CppDebugLocalsInfo *info) ); +DO_API( const char*, il2cpp_debug_local_get_name, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_start_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_end_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( Il2CppObject*, il2cpp_debug_method_get_param_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( Il2CppObject*, il2cpp_debug_frame_get_local_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( void*, il2cpp_debug_method_get_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, int64_t uid, int32_t offset) ); +DO_API( void, il2cpp_debug_method_set_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location, void *data) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location) ); +#endif diff --git a/module/src/main/cpp/il2cppapi/5.3.2f1/il2cpp-class.h b/module/src/main/cpp/il2cppapi/5.3.2f1/il2cpp-class.h new file mode 100644 index 00000000..cd5922bd --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.3.2f1/il2cpp-class.h @@ -0,0 +1,877 @@ +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +enum Il2CppProfileFlags { + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19 +}; +enum Il2CppGCEvent { + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +}; +enum Il2CppStat { + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +}; +enum Il2CppRuntimeUnhandledExceptionPolicy { + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +}; +struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +}; +typedef struct { + void* (*malloc_func)(size_t size); + void (*free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); +} Il2CppMemoryCallbacks; +typedef void (*register_object_callback)(void** arr, int size, void* userdata); +typedef void (*WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef const char* (*Il2CppSetFindPlugInCallback)(const char*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*methodPointerType)(); +typedef int32_t il2cpp_array_size_t; +typedef uint32_t Il2CppMethodSlot; +const int ipv6AddressSize = 16; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST= 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +static inline Il2CppMetadataUsage GetEncodedIndexType (EncodedMethodIndex index) +{ + return (Il2CppMetadataUsage)((index & 0xE0000000) >> 29); +} +static inline uint32_t GetDecodedMethodIndex (EncodedMethodIndex index) +{ + return index & 0x1FFFFFFFU; +} +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + CustomAttributeIndex customAttributeIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + MethodIndex delegateWrapperFromManagedToNativeIndex; + int32_t marshalingFunctionsIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + CustomAttributeIndex customAttributeIndex; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex delegateWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct Il2CppGenericMethodIndices +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyName +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t publicKeyToken[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + uint32_t token; +} Il2CppImageDefinition; +typedef struct Il2CppAssembly +{ + ImageIndex imageIndex; + CustomAttributeIndex customAttributeIndex; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum { + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE +} Il2CppCharSet; +typedef struct PInvokeArguments +{ + const char* moduleName; + const char* entryPoint; + Il2CppCallConvention callingConvention; + Il2CppCharSet charSet; + int parameterSize; + bool isNoMangle; +} PInvokeArguments; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +extern bool g_il2cpp_is_fully_initialized; +typedef struct { + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef struct CustomAttributeTypeCache +{ + int count; + Il2CppClass** attributeTypes; +} CustomAttributeTypeCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*, CustomAttributeTypeCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(const MethodInfo*, void*, void**); +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + methodPointerType method; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *declaring_type; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + CustomAttributeIndex customAttributeIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + const Il2CppType* byval_arg; + const Il2CppType* this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + const MethodInfo** vtable; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t packingSize; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppObject* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; +} Il2CppDomain; +typedef struct Il2CppImage +{ + const char* name; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable* nameToClassHashTable; + uint32_t token; +} Il2CppImage; +typedef struct Il2CppMarshalingFunctions +{ + methodPointerType marshal_to_native_func; + methodPointerType marshal_from_native_func; + methodPointerType marshal_cleanup_func; +} Il2CppMarshalingFunctions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const methodPointerType* methodPointers; + uint32_t delegateWrappersFromNativeToManagedCount; + const methodPointerType** delegateWrappersFromNativeToManaged; + uint32_t delegateWrappersFromManagedToNativeCount; + const methodPointerType* delegateWrappersFromManagedToNative; + uint32_t marshalingFunctionsCount; + const Il2CppMarshalingFunctions* marshalingFunctions; + uint32_t genericMethodPointersCount; + const methodPointerType* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t* fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes* typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + bool enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/5.3.3f1/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/5.3.3f1/il2cpp-api-functions.h new file mode 100644 index 00000000..46f1d098 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.3.3f1/il2cpp-api-functions.h @@ -0,0 +1,250 @@ + +DO_API( void, il2cpp_init, (const char* domain_name) ); +DO_API( void, il2cpp_shutdown, () ); +DO_API( void, il2cpp_set_config_dir, (const char *config_path) ); +DO_API( void, il2cpp_set_data_dir, (const char *data_path) ); +DO_API( void, il2cpp_set_commandline_arguments, (int argc, const char* argv[], const char* basedir) ); +DO_API( void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks* callbacks) ); +DO_API( Il2CppImage*, il2cpp_get_corlib, () ); +DO_API( void, il2cpp_add_internal_call, (const char* name, methodPointerType method) ); +DO_API( methodPointerType, il2cpp_resolve_icall, (const char* name) ); + +DO_API( void*, il2cpp_alloc, (size_t size) ); +DO_API( void, il2cpp_free, (void* ptr) ); + +// array +DO_API( Il2CppClass*, il2cpp_array_class_get, (Il2CppClass *element_class, uint32_t rank) ); +DO_API( uint32_t, il2cpp_array_length, (Il2CppArray* array) ); +DO_API( uint32_t, il2cpp_array_get_byte_length, (Il2CppArray *array) ); +DO_API( Il2CppArray*, il2cpp_array_new, (Il2CppClass *elementTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass *arrayTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_full, (Il2CppClass *array_class, il2cpp_array_size_t *lengths, il2cpp_array_size_t *lower_bounds) ); +DO_API( Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass *element_class, uint32_t rank, bool bounded) ); +DO_API( int, il2cpp_array_element_size, (const Il2CppClass* array_class) ); + +// assembly +DO_API( Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly *assembly) ); + +// class +DO_API( const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_generic, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_inflated, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_assignable_from, (Il2CppClass *klass, Il2CppClass *oklass) ); +DO_API( bool, il2cpp_class_is_subclass_of, (Il2CppClass *klass, Il2CppClass *klassc, bool check_interfaces) ); +DO_API( bool, il2cpp_class_has_parent, (Il2CppClass* klass, Il2CppClass* klassc) ); +DO_API( Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType* type) ); +DO_API( Il2CppClass*, il2cpp_class_from_name, (Il2CppImage* image, const char* namespaze, const char *name) ); +DO_API( Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType *type) ); +DO_API( Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass *klass) ); +DO_API( const EventInfo*, il2cpp_class_get_events, (Il2CppClass *klass, void* *iter)); +DO_API( FieldInfo*, il2cpp_class_get_fields, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass *klass, const char *name) ); +DO_API( FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass* klass, const char *name) ); +DO_API( const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass *klass, void* *iter) ); +DO_API( const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass *klass, const char* name, int argsCount) ); +DO_API( const char*, il2cpp_class_get_name, (Il2CppClass *klass) ); +DO_API( const char*, il2cpp_class_get_namespace, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_instance_size, (Il2CppClass *klass) ); +DO_API( size_t, il2cpp_class_num_fields, (const Il2CppClass* enumKlass) ); +DO_API( bool, il2cpp_class_is_valuetype, (const Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_value_size, (Il2CppClass *klass, uint32_t *align) ); +DO_API( int, il2cpp_class_get_flags, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_abstract, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_interface, (const Il2CppClass *klass) ); +DO_API( int, il2cpp_class_array_element_size, (const Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_from_type, (Il2CppType *type) ); +DO_API( const Il2CppType*, il2cpp_class_get_type, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_has_attribute, (Il2CppClass *klass, Il2CppClass *attr_class) ); +DO_API( bool, il2cpp_class_has_references, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_enum, (const Il2CppClass *klass) ); +DO_API( const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass* klass) ); +DO_API( const char*, il2cpp_class_get_assemblyname, (const Il2CppClass *klass) ); + +// testing only +DO_API( size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass *klass) ); +DO_API( void, il2cpp_class_get_bitmap, (Il2CppClass *klass, size_t* bitmap) ); + +// stats +DO_API( bool, il2cpp_stats_dump_to_file, (const char *path) ); +DO_API( uint64_t, il2cpp_stats_get_value, (Il2CppStat stat) ); + +// domain +DO_API( Il2CppDomain*, il2cpp_domain_get, () ); +DO_API( const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain* domain, const char* name) ); +DO_API( const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain* domain, size_t* size) ); + +// exception +DO_API( void, il2cpp_raise_exception, (Il2CppException*) ); +DO_API( Il2CppException*, il2cpp_exception_from_name_msg, (Il2CppImage* image, const char *name_space, const char *name, const char *msg) ); +DO_API( Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg) ); +DO_API( void, il2cpp_format_exception, (const Il2CppException* ex, char* message, int message_size) ); +DO_API( void, il2cpp_format_stack_trace, (const Il2CppException* ex, char* output, int output_size) ); +DO_API( void, il2cpp_unhandled_exception, (Il2CppException*) ); + +// field +DO_API( int, il2cpp_field_get_flags, (FieldInfo *field) ); +DO_API( const char*, il2cpp_field_get_name, (FieldInfo *field) ); +DO_API( Il2CppClass*, il2cpp_field_get_parent, (FieldInfo *field) ); +DO_API( size_t, il2cpp_field_get_offset, (FieldInfo *field) ); +DO_API( const Il2CppType*, il2cpp_field_get_type, (FieldInfo *field) ); +DO_API( void, il2cpp_field_get_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo *field, Il2CppObject *obj) ); +DO_API( bool, il2cpp_field_has_attribute, (FieldInfo *field, Il2CppClass *attr_class) ); +DO_API( void, il2cpp_field_set_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_get_value, (FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_set_value, (FieldInfo *field, void *value) ); + +// gc +DO_API( void, il2cpp_gc_collect, (int maxGenerations) ); +DO_API( int64_t, il2cpp_gc_get_used_size, () ); +DO_API( int64_t, il2cpp_gc_get_heap_size, () ); + +// gchandle +DO_API( uint32_t, il2cpp_gchandle_new, (Il2CppObject *obj, bool pinned) ); +DO_API( uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject *obj, bool track_resurrection) ); +DO_API( Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle) ); +DO_API( void, il2cpp_gchandle_free, (uint32_t gchandle) ); + +// liveness +DO_API( void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass* filter, int max_object_count, register_object_callback callback, void* userdata, WorldChangedCallback onWorldStarted, WorldChangedCallback onWorldStopped) ); +DO_API( void, il2cpp_unity_liveness_calculation_end, (void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject* root, void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_statics, (void* state) ); + +// method +DO_API( const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo* method) ); +DO_API( Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo* method) ); +DO_API( const char*, il2cpp_method_get_name, (const MethodInfo *method) ); +DO_API( Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo *method, Il2CppClass *refclass) ); +DO_API( bool, il2cpp_method_is_generic, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_inflated, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_instance, (const MethodInfo *method) ); +DO_API( uint32_t, il2cpp_method_get_param_count, (const MethodInfo *method) ); +DO_API( const Il2CppType*, il2cpp_method_get_param, (const MethodInfo *method, uint32_t index) ); +DO_API( Il2CppClass*, il2cpp_method_get_class, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_has_attribute, (const MethodInfo *method, Il2CppClass *attr_class) ); +DO_API( uint32_t, il2cpp_method_get_flags, (const MethodInfo *method, uint32_t *iflags) ); +DO_API( uint32_t, il2cpp_method_get_token, (const MethodInfo *method) ); +DO_API( const char*, il2cpp_method_get_param_name, (const MethodInfo *method, uint32_t index) ); + +// profiler +DO_API( void, il2cpp_profiler_install, (Il2CppProfiler *prof, Il2CppProfileFunc shutdown_callback) ); +DO_API( void, il2cpp_profiler_set_events, (Il2CppProfileFlags events) ); +DO_API( void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave) ); +DO_API( void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback) ); +DO_API( void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback) ); + +// property +DO_API( uint32_t, il2cpp_property_get_flags, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo *prop) ); +DO_API( const char*, il2cpp_property_get_name, (PropertyInfo *prop) ); +DO_API( Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo *prop) ); + +// object +DO_API( Il2CppClass*, il2cpp_object_get_class, (Il2CppObject* obj) ); +DO_API( uint32_t, il2cpp_object_get_size, (Il2CppObject* obj) ); +DO_API( const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject *obj, const MethodInfo *method) ); +DO_API( Il2CppObject*, il2cpp_object_new, (const Il2CppClass *klass) ); +DO_API( void*, il2cpp_object_unbox, (Il2CppObject* obj) ); + +DO_API( Il2CppObject*, il2cpp_value_box, (Il2CppClass *klass, void* data) ); + +// monitor +DO_API( void, il2cpp_monitor_enter, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_enter, (Il2CppObject* obj, uint32_t timeout) ); +DO_API( void, il2cpp_monitor_exit, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse_all, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_wait, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_wait, (Il2CppObject* obj, uint32_t timeout) ); + +// runtime +DO_API( Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo *method, void *obj, void **params, Il2CppObject **exc) ); +DO_API( Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo *method, void *obj, Il2CppObject **params, int paramCount, Il2CppObject **exc) ); +DO_API( void, il2cpp_runtime_class_init, (Il2CppClass* klass) ); +DO_API( void, il2cpp_runtime_object_init, (Il2CppObject* obj) ); + +DO_API( void, il2cpp_runtime_object_init_exception, (Il2CppObject* obj, Il2CppObject** exc) ); + +DO_API( void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value) ); + +// delegate +DO_API( Il2CppAsyncResult*, il2cpp_delegate_begin_invoke, (Il2CppDelegate* delegate, void** params, Il2CppDelegate* asyncCallback, Il2CppObject* state) ); +DO_API( Il2CppObject*, il2cpp_delegate_end_invoke, (Il2CppAsyncResult* asyncResult, void **out_args) ); + +// string +DO_API( int32_t, il2cpp_string_length, (Il2CppString* str) ); +DO_API( uint16_t*, il2cpp_string_chars, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_new, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length) ); +DO_API( Il2CppString*, il2cpp_string_new_utf16, (const uint16_t *text, int32_t len) ); +DO_API( Il2CppString*, il2cpp_string_new_wrapper, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_intern, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_is_interned, (Il2CppString* str) ); + +// thread +DO_API( char*, il2cpp_thread_get_name, (Il2CppThread *thread, uint32_t *len) ); +DO_API( Il2CppThread*, il2cpp_thread_current, () ); +DO_API( Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain *domain) ); +DO_API( void, il2cpp_thread_detach, (Il2CppThread *thread) ); + +DO_API( Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t *size) ); +DO_API( bool, il2cpp_is_vm_thread, (Il2CppThread *thread) ); + +// stacktrace +DO_API( void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( void, il2cpp_thread_walk_frame_stack, (Il2CppThread* thread, Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_top_frame, (Il2CppThread* thread, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_frame_at, (Il2CppThread* thread, int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( int32_t, il2cpp_current_thread_get_stack_depth, () ); +DO_API( int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread *thread) ); + +// type +DO_API( Il2CppObject*, il2cpp_type_get_object, (const Il2CppType *type) ); +DO_API( int, il2cpp_type_get_type, (const Il2CppType *type) ); +DO_API( Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType *type) ); +DO_API( char*, il2cpp_type_get_name, (const Il2CppType *type) ); + +// image +DO_API( const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_name, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_filename, (const Il2CppImage *image) ); +DO_API( const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage* image) ); + +// Memory information +DO_API( Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, () ); +DO_API( void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot* snapshot) ); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +#if IL2CPP_DEBUGGER_ENABLED +// debug +DO_API( const Il2CppDebugTypeInfo*, il2cpp_debug_get_class_info, (const Il2CppClass *klass) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_class_get_document, (const Il2CppDebugTypeInfo* info) ); +DO_API( const char*, il2cpp_debug_document_get_filename, (const Il2CppDebugDocument* document) ); +DO_API( const char*, il2cpp_debug_document_get_directory, (const Il2CppDebugDocument* document) ); +DO_API( const Il2CppDebugMethodInfo*, il2cpp_debug_get_method_info, (const MethodInfo *method) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_method_get_document, (const Il2CppDebugMethodInfo* info) ); +DO_API( const int32_t*, il2cpp_debug_method_get_offset_table, (const Il2CppDebugMethodInfo* info) ); +DO_API( size_t, il2cpp_debug_method_get_code_size, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_update_frame_il_offset, (int32_t il_offset) ); +DO_API( const Il2CppDebugLocalsInfo**, il2cpp_debug_method_get_locals_info, (const Il2CppDebugMethodInfo* info) ); +DO_API( const Il2CppClass*, il2cpp_debug_local_get_type, (const Il2CppDebugLocalsInfo *info) ); +DO_API( const char*, il2cpp_debug_local_get_name, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_start_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_end_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( Il2CppObject*, il2cpp_debug_method_get_param_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( Il2CppObject*, il2cpp_debug_frame_get_local_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( void*, il2cpp_debug_method_get_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, int64_t uid, int32_t offset) ); +DO_API( void, il2cpp_debug_method_set_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location, void *data) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location) ); +#endif diff --git a/module/src/main/cpp/il2cppapi/5.3.3f1/il2cpp-class.h b/module/src/main/cpp/il2cppapi/5.3.3f1/il2cpp-class.h new file mode 100644 index 00000000..bd7f0f62 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.3.3f1/il2cpp-class.h @@ -0,0 +1,795 @@ +typedef void (*methodPointerType)(); +typedef int32_t il2cpp_array_size_t; +typedef uint32_t Il2CppMethodSlot; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST= 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +static inline Il2CppMetadataUsage GetEncodedIndexType (EncodedMethodIndex index) +{ + return (Il2CppMetadataUsage)((index & 0xE0000000) >> 29); +} +static inline uint32_t GetDecodedMethodIndex (EncodedMethodIndex index) +{ + return index & 0x1FFFFFFFU; +} +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + CustomAttributeIndex customAttributeIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + MethodIndex delegateWrapperFromManagedToNativeIndex; + int32_t marshalingFunctionsIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + CustomAttributeIndex customAttributeIndex; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex delegateWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct Il2CppGenericMethodIndices +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyName +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t publicKeyToken[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + uint32_t token; +} Il2CppImageDefinition; +typedef struct Il2CppAssembly +{ + ImageIndex imageIndex; + CustomAttributeIndex customAttributeIndex; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum { + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE +} Il2CppCharSet; +typedef struct PInvokeArguments +{ + const char* moduleName; + const char* entryPoint; + Il2CppCallConvention callingConvention; + Il2CppCharSet charSet; + int parameterSize; + bool isNoMangle; +} PInvokeArguments; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +extern bool g_il2cpp_is_fully_initialized; +typedef struct { + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef struct CustomAttributeTypeCache +{ + int count; + Il2CppClass** attributeTypes; +} CustomAttributeTypeCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*, CustomAttributeTypeCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(const MethodInfo*, void*, void**); +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + methodPointerType method; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *declaring_type; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + CustomAttributeIndex customAttributeIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + const Il2CppType* byval_arg; + const Il2CppType* this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + const MethodInfo** vtable; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t packingSize; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import : 1; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppObject* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; +} Il2CppDomain; +typedef struct Il2CppImage +{ + const char* name; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable* nameToClassHashTable; + uint32_t token; +} Il2CppImage; +typedef struct Il2CppMarshalingFunctions +{ + methodPointerType marshal_to_native_func; + methodPointerType marshal_from_native_func; + methodPointerType marshal_cleanup_func; +} Il2CppMarshalingFunctions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const methodPointerType* methodPointers; + uint32_t delegateWrappersFromNativeToManagedCount; + const methodPointerType** delegateWrappersFromNativeToManaged; + uint32_t delegateWrappersFromManagedToNativeCount; + const methodPointerType* delegateWrappersFromManagedToNative; + uint32_t marshalingFunctionsCount; + const Il2CppMarshalingFunctions* marshalingFunctions; + uint32_t genericMethodPointersCount; + const methodPointerType* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t* fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes* typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + bool enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/5.3.5f1/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/5.3.5f1/il2cpp-api-functions.h new file mode 100644 index 00000000..c2d46166 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.3.5f1/il2cpp-api-functions.h @@ -0,0 +1,253 @@ + +DO_API( void, il2cpp_init, (const char* domain_name) ); +DO_API( void, il2cpp_shutdown, () ); +DO_API( void, il2cpp_set_config_dir, (const char *config_path) ); +DO_API( void, il2cpp_set_data_dir, (const char *data_path) ); +DO_API( void, il2cpp_set_commandline_arguments, (int argc, const char* argv[], const char* basedir) ); +DO_API( void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks* callbacks) ); +DO_API( const Il2CppImage*, il2cpp_get_corlib, () ); +DO_API( void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method) ); +DO_API( Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name) ); + +DO_API( void*, il2cpp_alloc, (size_t size) ); +DO_API( void, il2cpp_free, (void* ptr) ); + +// array +DO_API( Il2CppClass*, il2cpp_array_class_get, (Il2CppClass *element_class, uint32_t rank) ); +DO_API( uint32_t, il2cpp_array_length, (Il2CppArray* array) ); +DO_API( uint32_t, il2cpp_array_get_byte_length, (Il2CppArray *array) ); +DO_API( Il2CppArray*, il2cpp_array_new, (Il2CppClass *elementTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass *arrayTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_full, (Il2CppClass *array_class, il2cpp_array_size_t *lengths, il2cpp_array_size_t *lower_bounds) ); +DO_API( Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass *element_class, uint32_t rank, bool bounded) ); +DO_API( int, il2cpp_array_element_size, (const Il2CppClass* array_class) ); + +// assembly +DO_API( const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly *assembly) ); + +// class +DO_API( const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_generic, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_inflated, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_assignable_from, (Il2CppClass *klass, Il2CppClass *oklass) ); +DO_API( bool, il2cpp_class_is_subclass_of, (Il2CppClass *klass, Il2CppClass *klassc, bool check_interfaces) ); +DO_API( bool, il2cpp_class_has_parent, (Il2CppClass* klass, Il2CppClass* klassc) ); +DO_API( Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType* type) ); +DO_API( Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage* image, const char* namespaze, const char *name) ); +DO_API( Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType *type) ); +DO_API( Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass *klass) ); +DO_API( const EventInfo*, il2cpp_class_get_events, (Il2CppClass *klass, void* *iter)); +DO_API( FieldInfo*, il2cpp_class_get_fields, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass *klass, const char *name) ); +DO_API( FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass* klass, const char *name) ); +DO_API( const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass *klass, void* *iter) ); +DO_API( const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass *klass, const char* name, int argsCount) ); +DO_API( const char*, il2cpp_class_get_name, (Il2CppClass *klass) ); +DO_API( const char*, il2cpp_class_get_namespace, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_instance_size, (Il2CppClass *klass) ); +DO_API( size_t, il2cpp_class_num_fields, (const Il2CppClass* enumKlass) ); +DO_API( bool, il2cpp_class_is_valuetype, (const Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_value_size, (Il2CppClass *klass, uint32_t *align) ); +DO_API( int, il2cpp_class_get_flags, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_abstract, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_interface, (const Il2CppClass *klass) ); +DO_API( int, il2cpp_class_array_element_size, (const Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_from_type, (const Il2CppType *type) ); +DO_API( const Il2CppType*, il2cpp_class_get_type, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_has_attribute, (Il2CppClass *klass, Il2CppClass *attr_class) ); +DO_API( bool, il2cpp_class_has_references, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_enum, (const Il2CppClass *klass) ); +DO_API( const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass* klass) ); +DO_API( const char*, il2cpp_class_get_assemblyname, (const Il2CppClass *klass) ); + +// testing only +DO_API( size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass *klass) ); +DO_API( void, il2cpp_class_get_bitmap, (Il2CppClass *klass, size_t* bitmap) ); + +// stats +DO_API( bool, il2cpp_stats_dump_to_file, (const char *path) ); +DO_API( uint64_t, il2cpp_stats_get_value, (Il2CppStat stat) ); + +// domain +DO_API( Il2CppDomain*, il2cpp_domain_get, () ); +DO_API( const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain* domain, const char* name) ); +DO_API( const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain* domain, size_t* size) ); + +// exception +DO_API( void, il2cpp_raise_exception, (Il2CppException*) ); +DO_API( Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage* image, const char *name_space, const char *name, const char *msg) ); +DO_API( Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg) ); +DO_API( void, il2cpp_format_exception, (const Il2CppException* ex, char* message, int message_size) ); +DO_API( void, il2cpp_format_stack_trace, (const Il2CppException* ex, char* output, int output_size) ); +DO_API( void, il2cpp_unhandled_exception, (Il2CppException*) ); + +// field +DO_API( int, il2cpp_field_get_flags, (FieldInfo *field) ); +DO_API( const char*, il2cpp_field_get_name, (FieldInfo *field) ); +DO_API( Il2CppClass*, il2cpp_field_get_parent, (FieldInfo *field) ); +DO_API( size_t, il2cpp_field_get_offset, (FieldInfo *field) ); +DO_API( const Il2CppType*, il2cpp_field_get_type, (FieldInfo *field) ); +DO_API( void, il2cpp_field_get_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo *field, Il2CppObject *obj) ); +DO_API( bool, il2cpp_field_has_attribute, (FieldInfo *field, Il2CppClass *attr_class) ); +DO_API( void, il2cpp_field_set_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_get_value, (FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_set_value, (FieldInfo *field, void *value) ); + +// gc +DO_API( void, il2cpp_gc_collect, (int maxGenerations) ); +DO_API( int32_t, il2cpp_gc_collect_a_little, ()); +DO_API( void, il2cpp_gc_disable, ()); +DO_API( void, il2cpp_gc_enable, () ); +DO_API( int64_t, il2cpp_gc_get_used_size, () ); +DO_API( int64_t, il2cpp_gc_get_heap_size, () ); + +// gchandle +DO_API( uint32_t, il2cpp_gchandle_new, (Il2CppObject *obj, bool pinned) ); +DO_API( uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject *obj, bool track_resurrection) ); +DO_API( Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle) ); +DO_API( void, il2cpp_gchandle_free, (uint32_t gchandle) ); + +// liveness +DO_API( void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass* filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped) ); +DO_API( void, il2cpp_unity_liveness_calculation_end, (void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject* root, void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_statics, (void* state) ); + +// method +DO_API( const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo* method) ); +DO_API( Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo* method) ); +DO_API( const char*, il2cpp_method_get_name, (const MethodInfo *method) ); +DO_API( Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo *method, Il2CppClass *refclass) ); +DO_API( bool, il2cpp_method_is_generic, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_inflated, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_instance, (const MethodInfo *method) ); +DO_API( uint32_t, il2cpp_method_get_param_count, (const MethodInfo *method) ); +DO_API( const Il2CppType*, il2cpp_method_get_param, (const MethodInfo *method, uint32_t index) ); +DO_API( Il2CppClass*, il2cpp_method_get_class, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_has_attribute, (const MethodInfo *method, Il2CppClass *attr_class) ); +DO_API( uint32_t, il2cpp_method_get_flags, (const MethodInfo *method, uint32_t *iflags) ); +DO_API( uint32_t, il2cpp_method_get_token, (const MethodInfo *method) ); +DO_API( const char*, il2cpp_method_get_param_name, (const MethodInfo *method, uint32_t index) ); + +// profiler +DO_API( void, il2cpp_profiler_install, (Il2CppProfiler *prof, Il2CppProfileFunc shutdown_callback) ); +DO_API( void, il2cpp_profiler_set_events, (Il2CppProfileFlags events) ); +DO_API( void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave) ); +DO_API( void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback) ); +DO_API( void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback) ); + +// property +DO_API( uint32_t, il2cpp_property_get_flags, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo *prop) ); +DO_API( const char*, il2cpp_property_get_name, (PropertyInfo *prop) ); +DO_API( Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo *prop) ); + +// object +DO_API( Il2CppClass*, il2cpp_object_get_class, (Il2CppObject* obj) ); +DO_API( uint32_t, il2cpp_object_get_size, (Il2CppObject* obj) ); +DO_API( const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject *obj, const MethodInfo *method) ); +DO_API( Il2CppObject*, il2cpp_object_new, (const Il2CppClass *klass) ); +DO_API( void*, il2cpp_object_unbox, (Il2CppObject* obj) ); + +DO_API( Il2CppObject*, il2cpp_value_box, (Il2CppClass *klass, void* data) ); + +// monitor +DO_API( void, il2cpp_monitor_enter, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_enter, (Il2CppObject* obj, uint32_t timeout) ); +DO_API( void, il2cpp_monitor_exit, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse_all, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_wait, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_wait, (Il2CppObject* obj, uint32_t timeout) ); + +// runtime +DO_API( Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo *method, void *obj, void **params, Il2CppException **exc) ); +DO_API( Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo *method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc) ); +DO_API( void, il2cpp_runtime_class_init, (Il2CppClass* klass) ); +DO_API( void, il2cpp_runtime_object_init, (Il2CppObject* obj) ); + +DO_API( void, il2cpp_runtime_object_init_exception, (Il2CppObject* obj, Il2CppException** exc) ); + +DO_API( void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value) ); + +// delegate +DO_API( Il2CppAsyncResult*, il2cpp_delegate_begin_invoke, (Il2CppDelegate* delegate, void** params, Il2CppDelegate* asyncCallback, Il2CppObject* state) ); +DO_API( Il2CppObject*, il2cpp_delegate_end_invoke, (Il2CppAsyncResult* asyncResult, void **out_args) ); + +// string +DO_API( int32_t, il2cpp_string_length, (Il2CppString* str) ); +DO_API( uint16_t*, il2cpp_string_chars, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_new, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length) ); +DO_API( Il2CppString*, il2cpp_string_new_utf16, (const uint16_t *text, int32_t len) ); +DO_API( Il2CppString*, il2cpp_string_new_wrapper, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_intern, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_is_interned, (Il2CppString* str) ); + +// thread +DO_API( char*, il2cpp_thread_get_name, (Il2CppThread *thread, uint32_t *len) ); +DO_API( Il2CppThread*, il2cpp_thread_current, () ); +DO_API( Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain *domain) ); +DO_API( void, il2cpp_thread_detach, (Il2CppThread *thread) ); + +DO_API( Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t *size) ); +DO_API( bool, il2cpp_is_vm_thread, (Il2CppThread *thread) ); + +// stacktrace +DO_API( void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( void, il2cpp_thread_walk_frame_stack, (Il2CppThread* thread, Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_top_frame, (Il2CppThread* thread, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_frame_at, (Il2CppThread* thread, int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( int32_t, il2cpp_current_thread_get_stack_depth, () ); +DO_API( int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread *thread) ); + +// type +DO_API( Il2CppObject*, il2cpp_type_get_object, (const Il2CppType *type) ); +DO_API( int, il2cpp_type_get_type, (const Il2CppType *type) ); +DO_API( Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType *type) ); +DO_API( char*, il2cpp_type_get_name, (const Il2CppType *type) ); + +// image +DO_API( const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_name, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_filename, (const Il2CppImage *image) ); +DO_API( const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage* image) ); + +// Memory information +DO_API( Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, () ); +DO_API( void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot* snapshot) ); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +#if IL2CPP_DEBUGGER_ENABLED +// debug +DO_API( const Il2CppDebugTypeInfo*, il2cpp_debug_get_class_info, (const Il2CppClass *klass) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_class_get_document, (const Il2CppDebugTypeInfo* info) ); +DO_API( const char*, il2cpp_debug_document_get_filename, (const Il2CppDebugDocument* document) ); +DO_API( const char*, il2cpp_debug_document_get_directory, (const Il2CppDebugDocument* document) ); +DO_API( const Il2CppDebugMethodInfo*, il2cpp_debug_get_method_info, (const MethodInfo *method) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_method_get_document, (const Il2CppDebugMethodInfo* info) ); +DO_API( const int32_t*, il2cpp_debug_method_get_offset_table, (const Il2CppDebugMethodInfo* info) ); +DO_API( size_t, il2cpp_debug_method_get_code_size, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_update_frame_il_offset, (int32_t il_offset) ); +DO_API( const Il2CppDebugLocalsInfo**, il2cpp_debug_method_get_locals_info, (const Il2CppDebugMethodInfo* info) ); +DO_API( const Il2CppClass*, il2cpp_debug_local_get_type, (const Il2CppDebugLocalsInfo *info) ); +DO_API( const char*, il2cpp_debug_local_get_name, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_start_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_end_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( Il2CppObject*, il2cpp_debug_method_get_param_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( Il2CppObject*, il2cpp_debug_frame_get_local_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( void*, il2cpp_debug_method_get_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, int64_t uid, int32_t offset) ); +DO_API( void, il2cpp_debug_method_set_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location, void *data) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location) ); +#endif diff --git a/module/src/main/cpp/il2cppapi/5.3.5f1/il2cpp-class.h b/module/src/main/cpp/il2cppapi/5.3.5f1/il2cpp-class.h new file mode 100644 index 00000000..00afeba1 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.3.5f1/il2cpp-class.h @@ -0,0 +1,924 @@ +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef enum Il2CppProfileFlags +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19 +} Il2CppProfileFlags; +typedef enum Il2CppGCEvent +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum Il2CppStat +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum StackFrameType +{ + FRAME_TYPE_MANAGED = 0, + FRAME_TYPE_DEBUGGER_INVOKE = 1, + FRAME_TYPE_MANAGED_TO_NATIVE = 2, + FRAME_TYPE_SENTINEL = 3 +} StackFrameType; +typedef enum Il2CppRuntimeUnhandledExceptionPolicy +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct { + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef const char* (*Il2CppSetFindPlugInCallback)(const char*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef int32_t il2cpp_array_size_t; +typedef uint32_t Il2CppMethodSlot; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST= 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t GuidIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +const GuidIndex kGuidIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +static inline Il2CppMetadataUsage GetEncodedIndexType (EncodedMethodIndex index) +{ + return (Il2CppMetadataUsage)((index & 0xE0000000) >> 29); +} +static inline uint32_t GetDecodedMethodIndex (EncodedMethodIndex index) +{ + return index & 0x1FFFFFFFU; +} +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + CustomAttributeIndex customAttributeIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + MethodIndex delegateWrapperFromManagedToNativeIndex; + int32_t marshalingFunctionsIndex; + int32_t ccwFunctionIndex; + GuidIndex guidIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + CustomAttributeIndex customAttributeIndex; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex delegateWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct Il2CppGenericMethodIndices +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyName +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t publicKeyToken[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + uint32_t token; +} Il2CppImageDefinition; +typedef struct Il2CppAssembly +{ + ImageIndex imageIndex; + CustomAttributeIndex customAttributeIndex; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum { + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE +} Il2CppCharSet; +typedef struct PInvokeArguments +{ + const char* moduleName; + const char* entryPoint; + Il2CppCallConvention callingConvention; + Il2CppCharSet charSet; + int parameterSize; + bool isNoMangle; +} PInvokeArguments; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +extern bool g_il2cpp_is_fully_initialized; +typedef struct { + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef struct CustomAttributeTypeCache +{ + int count; + Il2CppClass** attributeTypes; +} CustomAttributeTypeCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(const MethodInfo*, void*, void**); +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer method; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *declaring_type; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + CustomAttributeIndex customAttributeIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + const Il2CppType* byval_arg; + const Il2CppType* this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + const MethodInfo** vtable; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t packingSize; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import : 1; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppObject* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; +} Il2CppDomain; +typedef struct Il2CppImage +{ + const char* name; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable* nameToClassHashTable; + uint32_t token; +} Il2CppImage; +typedef struct Il2CppMarshalingFunctions +{ + Il2CppMethodPointer marshal_to_native_func; + Il2CppMethodPointer marshal_from_native_func; + Il2CppMethodPointer marshal_cleanup_func; +} Il2CppMarshalingFunctions; +typedef struct Il2CppCodeGenOptions +{ + bool enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const Il2CppMethodPointer* methodPointers; + uint32_t delegateWrappersFromNativeToManagedCount; + const Il2CppMethodPointer** delegateWrappersFromNativeToManaged; + uint32_t delegateWrappersFromManagedToNativeCount; + const Il2CppMethodPointer* delegateWrappersFromManagedToNative; + uint32_t marshalingFunctionsCount; + const Il2CppMarshalingFunctions* marshalingFunctions; + uint32_t ccwMarshalingFunctionsCount; + const Il2CppMethodPointer* ccwMarshalingFunctions; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + GuidIndex guidCount; + const Il2CppGuid** guids; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t* fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes* typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + bool enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/5.3.6f1/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/5.3.6f1/il2cpp-api-functions.h new file mode 100644 index 00000000..f326efb2 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.3.6f1/il2cpp-api-functions.h @@ -0,0 +1,257 @@ + +DO_API( void, il2cpp_init, (const char* domain_name) ); +DO_API( void, il2cpp_shutdown, () ); +DO_API( void, il2cpp_set_config_dir, (const char *config_path) ); +DO_API( void, il2cpp_set_data_dir, (const char *data_path) ); +DO_API( void, il2cpp_set_commandline_arguments, (int argc, const char* argv[], const char* basedir) ); +DO_API( void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks* callbacks) ); +DO_API( const Il2CppImage*, il2cpp_get_corlib, () ); +DO_API( void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method) ); +DO_API( Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name) ); + +DO_API( void*, il2cpp_alloc, (size_t size) ); +DO_API( void, il2cpp_free, (void* ptr) ); + +// array +DO_API( Il2CppClass*, il2cpp_array_class_get, (Il2CppClass *element_class, uint32_t rank) ); +DO_API( uint32_t, il2cpp_array_length, (Il2CppArray* array) ); +DO_API( uint32_t, il2cpp_array_get_byte_length, (Il2CppArray *array) ); +DO_API( Il2CppArray*, il2cpp_array_new, (Il2CppClass *elementTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass *arrayTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_full, (Il2CppClass *array_class, il2cpp_array_size_t *lengths, il2cpp_array_size_t *lower_bounds) ); +DO_API( Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass *element_class, uint32_t rank, bool bounded) ); +DO_API( int, il2cpp_array_element_size, (const Il2CppClass* array_class) ); + +// assembly +DO_API( const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly *assembly) ); + +// class +DO_API( const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_generic, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_inflated, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_assignable_from, (Il2CppClass *klass, Il2CppClass *oklass) ); +DO_API( bool, il2cpp_class_is_subclass_of, (Il2CppClass *klass, Il2CppClass *klassc, bool check_interfaces) ); +DO_API( bool, il2cpp_class_has_parent, (Il2CppClass* klass, Il2CppClass* klassc) ); +DO_API( Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType* type) ); +DO_API( Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage* image, const char* namespaze, const char *name) ); +DO_API( Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType *type) ); +DO_API( Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass *klass) ); +DO_API( const EventInfo*, il2cpp_class_get_events, (Il2CppClass *klass, void* *iter)); +DO_API( FieldInfo*, il2cpp_class_get_fields, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass *klass, const char *name) ); +DO_API( FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass* klass, const char *name) ); +DO_API( const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass *klass, void* *iter) ); +DO_API( const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass *klass, const char* name, int argsCount) ); +DO_API( const char*, il2cpp_class_get_name, (Il2CppClass *klass) ); +DO_API( const char*, il2cpp_class_get_namespace, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_instance_size, (Il2CppClass *klass) ); +DO_API( size_t, il2cpp_class_num_fields, (const Il2CppClass* enumKlass) ); +DO_API( bool, il2cpp_class_is_valuetype, (const Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_value_size, (Il2CppClass *klass, uint32_t *align) ); +DO_API( int, il2cpp_class_get_flags, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_abstract, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_interface, (const Il2CppClass *klass) ); +DO_API( int, il2cpp_class_array_element_size, (const Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_from_type, (const Il2CppType *type) ); +DO_API( const Il2CppType*, il2cpp_class_get_type, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_has_attribute, (Il2CppClass *klass, Il2CppClass *attr_class) ); +DO_API( bool, il2cpp_class_has_references, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_enum, (const Il2CppClass *klass) ); +DO_API( const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass* klass) ); +DO_API( const char*, il2cpp_class_get_assemblyname, (const Il2CppClass *klass) ); + +// testing only +DO_API( size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass *klass) ); +DO_API( void, il2cpp_class_get_bitmap, (Il2CppClass *klass, size_t* bitmap) ); + +// stats +DO_API( bool, il2cpp_stats_dump_to_file, (const char *path) ); +DO_API( uint64_t, il2cpp_stats_get_value, (Il2CppStat stat) ); + +// domain +DO_API( Il2CppDomain*, il2cpp_domain_get, () ); +DO_API( const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain* domain, const char* name) ); +DO_API( const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain* domain, size_t* size) ); + +// exception +DO_API( void, il2cpp_raise_exception, (Il2CppException*) ); +DO_API( Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage* image, const char *name_space, const char *name, const char *msg) ); +DO_API( Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg) ); +DO_API( void, il2cpp_format_exception, (const Il2CppException* ex, char* message, int message_size) ); +DO_API( void, il2cpp_format_stack_trace, (const Il2CppException* ex, char* output, int output_size) ); +DO_API( void, il2cpp_unhandled_exception, (Il2CppException*) ); + +// field +DO_API( int, il2cpp_field_get_flags, (FieldInfo *field) ); +DO_API( const char*, il2cpp_field_get_name, (FieldInfo *field) ); +DO_API( Il2CppClass*, il2cpp_field_get_parent, (FieldInfo *field) ); +DO_API( size_t, il2cpp_field_get_offset, (FieldInfo *field) ); +DO_API( const Il2CppType*, il2cpp_field_get_type, (FieldInfo *field) ); +DO_API( void, il2cpp_field_get_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo *field, Il2CppObject *obj) ); +DO_API( bool, il2cpp_field_has_attribute, (FieldInfo *field, Il2CppClass *attr_class) ); +DO_API( void, il2cpp_field_set_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_get_value, (FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_set_value, (FieldInfo *field, void *value) ); + +// gc +DO_API( void, il2cpp_gc_collect, (int maxGenerations) ); +DO_API( int32_t, il2cpp_gc_collect_a_little, ()); +DO_API( void, il2cpp_gc_disable, ()); +DO_API( void, il2cpp_gc_enable, () ); +DO_API( int64_t, il2cpp_gc_get_used_size, () ); +DO_API( int64_t, il2cpp_gc_get_heap_size, () ); + +// gchandle +DO_API( uint32_t, il2cpp_gchandle_new, (Il2CppObject *obj, bool pinned) ); +DO_API( uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject *obj, bool track_resurrection) ); +DO_API( Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle) ); +DO_API( void, il2cpp_gchandle_free, (uint32_t gchandle) ); + +// liveness +DO_API( void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass* filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped) ); +DO_API( void, il2cpp_unity_liveness_calculation_end, (void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject* root, void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_statics, (void* state) ); + +// method +DO_API( const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo* method) ); +DO_API( Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo* method) ); +DO_API( const char*, il2cpp_method_get_name, (const MethodInfo *method) ); +DO_API( Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo *method, Il2CppClass *refclass) ); +DO_API( bool, il2cpp_method_is_generic, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_inflated, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_instance, (const MethodInfo *method) ); +DO_API( uint32_t, il2cpp_method_get_param_count, (const MethodInfo *method) ); +DO_API( const Il2CppType*, il2cpp_method_get_param, (const MethodInfo *method, uint32_t index) ); +DO_API( Il2CppClass*, il2cpp_method_get_class, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_has_attribute, (const MethodInfo *method, Il2CppClass *attr_class) ); +DO_API( uint32_t, il2cpp_method_get_flags, (const MethodInfo *method, uint32_t *iflags) ); +DO_API( uint32_t, il2cpp_method_get_token, (const MethodInfo *method) ); +DO_API( const char*, il2cpp_method_get_param_name, (const MethodInfo *method, uint32_t index) ); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API( void, il2cpp_profiler_install, (Il2CppProfiler *prof, Il2CppProfileFunc shutdown_callback) ); +DO_API( void, il2cpp_profiler_set_events, (Il2CppProfileFlags events) ); +DO_API( void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave) ); +DO_API( void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback) ); +DO_API( void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback) ); + +#endif + +// property +DO_API( uint32_t, il2cpp_property_get_flags, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo *prop) ); +DO_API( const char*, il2cpp_property_get_name, (PropertyInfo *prop) ); +DO_API( Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo *prop) ); + +// object +DO_API( Il2CppClass*, il2cpp_object_get_class, (Il2CppObject* obj) ); +DO_API( uint32_t, il2cpp_object_get_size, (Il2CppObject* obj) ); +DO_API( const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject *obj, const MethodInfo *method) ); +DO_API( Il2CppObject*, il2cpp_object_new, (const Il2CppClass *klass) ); +DO_API( void*, il2cpp_object_unbox, (Il2CppObject* obj) ); + +DO_API( Il2CppObject*, il2cpp_value_box, (Il2CppClass *klass, void* data) ); + +// monitor +DO_API( void, il2cpp_monitor_enter, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_enter, (Il2CppObject* obj, uint32_t timeout) ); +DO_API( void, il2cpp_monitor_exit, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse_all, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_wait, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_wait, (Il2CppObject* obj, uint32_t timeout) ); + +// runtime +DO_API( Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo *method, void *obj, void **params, Il2CppException **exc) ); +DO_API( Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo *method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc) ); +DO_API( void, il2cpp_runtime_class_init, (Il2CppClass* klass) ); +DO_API( void, il2cpp_runtime_object_init, (Il2CppObject* obj) ); + +DO_API( void, il2cpp_runtime_object_init_exception, (Il2CppObject* obj, Il2CppException** exc) ); + +DO_API( void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value) ); + +// delegate +DO_API( Il2CppAsyncResult*, il2cpp_delegate_begin_invoke, (Il2CppDelegate* delegate, void** params, Il2CppDelegate* asyncCallback, Il2CppObject* state) ); +DO_API( Il2CppObject*, il2cpp_delegate_end_invoke, (Il2CppAsyncResult* asyncResult, void **out_args) ); + +// string +DO_API( int32_t, il2cpp_string_length, (Il2CppString* str) ); +DO_API( Il2CppChar*, il2cpp_string_chars, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_new, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length) ); +DO_API( Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar *text, int32_t len) ); +DO_API( Il2CppString*, il2cpp_string_new_wrapper, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_intern, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_is_interned, (Il2CppString* str) ); + +// thread +DO_API( char*, il2cpp_thread_get_name, (Il2CppThread *thread, uint32_t *len) ); +DO_API( Il2CppThread*, il2cpp_thread_current, () ); +DO_API( Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain *domain) ); +DO_API( void, il2cpp_thread_detach, (Il2CppThread *thread) ); + +DO_API( Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t *size) ); +DO_API( bool, il2cpp_is_vm_thread, (Il2CppThread *thread) ); + +// stacktrace +DO_API( void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( void, il2cpp_thread_walk_frame_stack, (Il2CppThread* thread, Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_top_frame, (Il2CppThread* thread, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_frame_at, (Il2CppThread* thread, int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( int32_t, il2cpp_current_thread_get_stack_depth, () ); +DO_API( int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread *thread) ); + +// type +DO_API( Il2CppObject*, il2cpp_type_get_object, (const Il2CppType *type) ); +DO_API( int, il2cpp_type_get_type, (const Il2CppType *type) ); +DO_API( Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType *type) ); +DO_API( char*, il2cpp_type_get_name, (const Il2CppType *type) ); + +// image +DO_API( const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_name, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_filename, (const Il2CppImage *image) ); +DO_API( const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage* image) ); + +// Memory information +DO_API( Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, () ); +DO_API( void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot* snapshot) ); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +#if IL2CPP_DEBUGGER_ENABLED +// debug +DO_API( const Il2CppDebugTypeInfo*, il2cpp_debug_get_class_info, (const Il2CppClass *klass) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_class_get_document, (const Il2CppDebugTypeInfo* info) ); +DO_API( const char*, il2cpp_debug_document_get_filename, (const Il2CppDebugDocument* document) ); +DO_API( const char*, il2cpp_debug_document_get_directory, (const Il2CppDebugDocument* document) ); +DO_API( const Il2CppDebugMethodInfo*, il2cpp_debug_get_method_info, (const MethodInfo *method) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_method_get_document, (const Il2CppDebugMethodInfo* info) ); +DO_API( const int32_t*, il2cpp_debug_method_get_offset_table, (const Il2CppDebugMethodInfo* info) ); +DO_API( size_t, il2cpp_debug_method_get_code_size, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_update_frame_il_offset, (int32_t il_offset) ); +DO_API( const Il2CppDebugLocalsInfo**, il2cpp_debug_method_get_locals_info, (const Il2CppDebugMethodInfo* info) ); +DO_API( const Il2CppClass*, il2cpp_debug_local_get_type, (const Il2CppDebugLocalsInfo *info) ); +DO_API( const char*, il2cpp_debug_local_get_name, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_start_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_end_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( Il2CppObject*, il2cpp_debug_method_get_param_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( Il2CppObject*, il2cpp_debug_frame_get_local_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( void*, il2cpp_debug_method_get_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, int64_t uid, int32_t offset) ); +DO_API( void, il2cpp_debug_method_set_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location, void *data) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location) ); +#endif diff --git a/module/src/main/cpp/il2cppapi/5.3.6f1/il2cpp-class.h b/module/src/main/cpp/il2cppapi/5.3.6f1/il2cpp-class.h new file mode 100644 index 00000000..f538a45e --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.3.6f1/il2cpp-class.h @@ -0,0 +1,985 @@ +typedef uint32_t Il2CppMethodSlot; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef enum Il2CppProfileFlags +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19 +} Il2CppProfileFlags; +typedef enum Il2CppGCEvent +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum Il2CppStat +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum StackFrameType +{ + FRAME_TYPE_MANAGED = 0, + FRAME_TYPE_DEBUGGER_INVOKE = 1, + FRAME_TYPE_MANAGED_TO_NATIVE = 2, + FRAME_TYPE_SENTINEL = 3 +} StackFrameType; +typedef enum Il2CppRuntimeUnhandledExceptionPolicy +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct { + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef int32_t il2cpp_array_size_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST= 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t GuidIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +const GuidIndex kGuidIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +static inline Il2CppMetadataUsage GetEncodedIndexType (EncodedMethodIndex index) +{ + return (Il2CppMetadataUsage)((index & 0xE0000000) >> 29); +} +static inline uint32_t GetDecodedMethodIndex (EncodedMethodIndex index) +{ + return index & 0x1FFFFFFFU; +} +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + CustomAttributeIndex customAttributeIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + MethodIndex reversePInvokeWrapperIndex; + int32_t marshalingFunctionsIndex; + int32_t ccwFunctionIndex; + GuidIndex guidIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + CustomAttributeIndex customAttributeIndex; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex reversePInvokeWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct Il2CppGenericMethodIndices +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyName +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t publicKeyToken[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + uint32_t token; +} Il2CppImageDefinition; +typedef struct Il2CppAssembly +{ + ImageIndex imageIndex; + CustomAttributeIndex customAttributeIndex; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum { + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE +} Il2CppCharSet; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct VirtualInvokeData VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +extern bool g_il2cpp_is_fully_initialized; +typedef struct { + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef struct CustomAttributeTypeCache +{ + int count; + Il2CppClass** attributeTypes; +} CustomAttributeTypeCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(const MethodInfo*, void*, void**); +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *declaring_type; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + CustomAttributeIndex customAttributeIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + const Il2CppType* byval_arg; + const Il2CppType* this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + VirtualInvokeData* vtable; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t packingSize; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppObject* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; +} Il2CppDomain; +typedef struct Il2CppImage +{ + const char* name; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable* nameToClassHashTable; + uint32_t token; +} Il2CppImage; +typedef struct Il2CppMarshalingFunctions +{ + Il2CppMethodPointer marshal_to_native_func; + Il2CppMethodPointer marshal_from_native_func; + Il2CppMethodPointer marshal_cleanup_func; +} Il2CppMarshalingFunctions; +typedef struct Il2CppCodeGenOptions +{ + bool enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const Il2CppMethodPointer* methodPointers; + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t delegateWrappersFromManagedToNativeCount; + const Il2CppMethodPointer* delegateWrappersFromManagedToNative; + uint32_t marshalingFunctionsCount; + const Il2CppMarshalingFunctions* marshalingFunctions; + uint32_t ccwMarshalingFunctionsCount; + const Il2CppMethodPointer* ccwMarshalingFunctions; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + GuidIndex guidCount; + const Il2CppGuid** guids; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t* fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes* typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + bool enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/5.3.7f1/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/5.3.7f1/il2cpp-api-functions.h new file mode 100644 index 00000000..f326efb2 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.3.7f1/il2cpp-api-functions.h @@ -0,0 +1,257 @@ + +DO_API( void, il2cpp_init, (const char* domain_name) ); +DO_API( void, il2cpp_shutdown, () ); +DO_API( void, il2cpp_set_config_dir, (const char *config_path) ); +DO_API( void, il2cpp_set_data_dir, (const char *data_path) ); +DO_API( void, il2cpp_set_commandline_arguments, (int argc, const char* argv[], const char* basedir) ); +DO_API( void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks* callbacks) ); +DO_API( const Il2CppImage*, il2cpp_get_corlib, () ); +DO_API( void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method) ); +DO_API( Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name) ); + +DO_API( void*, il2cpp_alloc, (size_t size) ); +DO_API( void, il2cpp_free, (void* ptr) ); + +// array +DO_API( Il2CppClass*, il2cpp_array_class_get, (Il2CppClass *element_class, uint32_t rank) ); +DO_API( uint32_t, il2cpp_array_length, (Il2CppArray* array) ); +DO_API( uint32_t, il2cpp_array_get_byte_length, (Il2CppArray *array) ); +DO_API( Il2CppArray*, il2cpp_array_new, (Il2CppClass *elementTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass *arrayTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_full, (Il2CppClass *array_class, il2cpp_array_size_t *lengths, il2cpp_array_size_t *lower_bounds) ); +DO_API( Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass *element_class, uint32_t rank, bool bounded) ); +DO_API( int, il2cpp_array_element_size, (const Il2CppClass* array_class) ); + +// assembly +DO_API( const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly *assembly) ); + +// class +DO_API( const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_generic, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_inflated, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_assignable_from, (Il2CppClass *klass, Il2CppClass *oklass) ); +DO_API( bool, il2cpp_class_is_subclass_of, (Il2CppClass *klass, Il2CppClass *klassc, bool check_interfaces) ); +DO_API( bool, il2cpp_class_has_parent, (Il2CppClass* klass, Il2CppClass* klassc) ); +DO_API( Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType* type) ); +DO_API( Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage* image, const char* namespaze, const char *name) ); +DO_API( Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType *type) ); +DO_API( Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass *klass) ); +DO_API( const EventInfo*, il2cpp_class_get_events, (Il2CppClass *klass, void* *iter)); +DO_API( FieldInfo*, il2cpp_class_get_fields, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass *klass, const char *name) ); +DO_API( FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass* klass, const char *name) ); +DO_API( const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass *klass, void* *iter) ); +DO_API( const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass *klass, const char* name, int argsCount) ); +DO_API( const char*, il2cpp_class_get_name, (Il2CppClass *klass) ); +DO_API( const char*, il2cpp_class_get_namespace, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_instance_size, (Il2CppClass *klass) ); +DO_API( size_t, il2cpp_class_num_fields, (const Il2CppClass* enumKlass) ); +DO_API( bool, il2cpp_class_is_valuetype, (const Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_value_size, (Il2CppClass *klass, uint32_t *align) ); +DO_API( int, il2cpp_class_get_flags, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_abstract, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_interface, (const Il2CppClass *klass) ); +DO_API( int, il2cpp_class_array_element_size, (const Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_from_type, (const Il2CppType *type) ); +DO_API( const Il2CppType*, il2cpp_class_get_type, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_has_attribute, (Il2CppClass *klass, Il2CppClass *attr_class) ); +DO_API( bool, il2cpp_class_has_references, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_enum, (const Il2CppClass *klass) ); +DO_API( const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass* klass) ); +DO_API( const char*, il2cpp_class_get_assemblyname, (const Il2CppClass *klass) ); + +// testing only +DO_API( size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass *klass) ); +DO_API( void, il2cpp_class_get_bitmap, (Il2CppClass *klass, size_t* bitmap) ); + +// stats +DO_API( bool, il2cpp_stats_dump_to_file, (const char *path) ); +DO_API( uint64_t, il2cpp_stats_get_value, (Il2CppStat stat) ); + +// domain +DO_API( Il2CppDomain*, il2cpp_domain_get, () ); +DO_API( const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain* domain, const char* name) ); +DO_API( const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain* domain, size_t* size) ); + +// exception +DO_API( void, il2cpp_raise_exception, (Il2CppException*) ); +DO_API( Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage* image, const char *name_space, const char *name, const char *msg) ); +DO_API( Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg) ); +DO_API( void, il2cpp_format_exception, (const Il2CppException* ex, char* message, int message_size) ); +DO_API( void, il2cpp_format_stack_trace, (const Il2CppException* ex, char* output, int output_size) ); +DO_API( void, il2cpp_unhandled_exception, (Il2CppException*) ); + +// field +DO_API( int, il2cpp_field_get_flags, (FieldInfo *field) ); +DO_API( const char*, il2cpp_field_get_name, (FieldInfo *field) ); +DO_API( Il2CppClass*, il2cpp_field_get_parent, (FieldInfo *field) ); +DO_API( size_t, il2cpp_field_get_offset, (FieldInfo *field) ); +DO_API( const Il2CppType*, il2cpp_field_get_type, (FieldInfo *field) ); +DO_API( void, il2cpp_field_get_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo *field, Il2CppObject *obj) ); +DO_API( bool, il2cpp_field_has_attribute, (FieldInfo *field, Il2CppClass *attr_class) ); +DO_API( void, il2cpp_field_set_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_get_value, (FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_set_value, (FieldInfo *field, void *value) ); + +// gc +DO_API( void, il2cpp_gc_collect, (int maxGenerations) ); +DO_API( int32_t, il2cpp_gc_collect_a_little, ()); +DO_API( void, il2cpp_gc_disable, ()); +DO_API( void, il2cpp_gc_enable, () ); +DO_API( int64_t, il2cpp_gc_get_used_size, () ); +DO_API( int64_t, il2cpp_gc_get_heap_size, () ); + +// gchandle +DO_API( uint32_t, il2cpp_gchandle_new, (Il2CppObject *obj, bool pinned) ); +DO_API( uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject *obj, bool track_resurrection) ); +DO_API( Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle) ); +DO_API( void, il2cpp_gchandle_free, (uint32_t gchandle) ); + +// liveness +DO_API( void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass* filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped) ); +DO_API( void, il2cpp_unity_liveness_calculation_end, (void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject* root, void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_statics, (void* state) ); + +// method +DO_API( const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo* method) ); +DO_API( Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo* method) ); +DO_API( const char*, il2cpp_method_get_name, (const MethodInfo *method) ); +DO_API( Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo *method, Il2CppClass *refclass) ); +DO_API( bool, il2cpp_method_is_generic, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_inflated, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_instance, (const MethodInfo *method) ); +DO_API( uint32_t, il2cpp_method_get_param_count, (const MethodInfo *method) ); +DO_API( const Il2CppType*, il2cpp_method_get_param, (const MethodInfo *method, uint32_t index) ); +DO_API( Il2CppClass*, il2cpp_method_get_class, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_has_attribute, (const MethodInfo *method, Il2CppClass *attr_class) ); +DO_API( uint32_t, il2cpp_method_get_flags, (const MethodInfo *method, uint32_t *iflags) ); +DO_API( uint32_t, il2cpp_method_get_token, (const MethodInfo *method) ); +DO_API( const char*, il2cpp_method_get_param_name, (const MethodInfo *method, uint32_t index) ); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API( void, il2cpp_profiler_install, (Il2CppProfiler *prof, Il2CppProfileFunc shutdown_callback) ); +DO_API( void, il2cpp_profiler_set_events, (Il2CppProfileFlags events) ); +DO_API( void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave) ); +DO_API( void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback) ); +DO_API( void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback) ); + +#endif + +// property +DO_API( uint32_t, il2cpp_property_get_flags, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo *prop) ); +DO_API( const char*, il2cpp_property_get_name, (PropertyInfo *prop) ); +DO_API( Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo *prop) ); + +// object +DO_API( Il2CppClass*, il2cpp_object_get_class, (Il2CppObject* obj) ); +DO_API( uint32_t, il2cpp_object_get_size, (Il2CppObject* obj) ); +DO_API( const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject *obj, const MethodInfo *method) ); +DO_API( Il2CppObject*, il2cpp_object_new, (const Il2CppClass *klass) ); +DO_API( void*, il2cpp_object_unbox, (Il2CppObject* obj) ); + +DO_API( Il2CppObject*, il2cpp_value_box, (Il2CppClass *klass, void* data) ); + +// monitor +DO_API( void, il2cpp_monitor_enter, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_enter, (Il2CppObject* obj, uint32_t timeout) ); +DO_API( void, il2cpp_monitor_exit, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse_all, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_wait, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_wait, (Il2CppObject* obj, uint32_t timeout) ); + +// runtime +DO_API( Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo *method, void *obj, void **params, Il2CppException **exc) ); +DO_API( Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo *method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc) ); +DO_API( void, il2cpp_runtime_class_init, (Il2CppClass* klass) ); +DO_API( void, il2cpp_runtime_object_init, (Il2CppObject* obj) ); + +DO_API( void, il2cpp_runtime_object_init_exception, (Il2CppObject* obj, Il2CppException** exc) ); + +DO_API( void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value) ); + +// delegate +DO_API( Il2CppAsyncResult*, il2cpp_delegate_begin_invoke, (Il2CppDelegate* delegate, void** params, Il2CppDelegate* asyncCallback, Il2CppObject* state) ); +DO_API( Il2CppObject*, il2cpp_delegate_end_invoke, (Il2CppAsyncResult* asyncResult, void **out_args) ); + +// string +DO_API( int32_t, il2cpp_string_length, (Il2CppString* str) ); +DO_API( Il2CppChar*, il2cpp_string_chars, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_new, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length) ); +DO_API( Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar *text, int32_t len) ); +DO_API( Il2CppString*, il2cpp_string_new_wrapper, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_intern, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_is_interned, (Il2CppString* str) ); + +// thread +DO_API( char*, il2cpp_thread_get_name, (Il2CppThread *thread, uint32_t *len) ); +DO_API( Il2CppThread*, il2cpp_thread_current, () ); +DO_API( Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain *domain) ); +DO_API( void, il2cpp_thread_detach, (Il2CppThread *thread) ); + +DO_API( Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t *size) ); +DO_API( bool, il2cpp_is_vm_thread, (Il2CppThread *thread) ); + +// stacktrace +DO_API( void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( void, il2cpp_thread_walk_frame_stack, (Il2CppThread* thread, Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_top_frame, (Il2CppThread* thread, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_frame_at, (Il2CppThread* thread, int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( int32_t, il2cpp_current_thread_get_stack_depth, () ); +DO_API( int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread *thread) ); + +// type +DO_API( Il2CppObject*, il2cpp_type_get_object, (const Il2CppType *type) ); +DO_API( int, il2cpp_type_get_type, (const Il2CppType *type) ); +DO_API( Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType *type) ); +DO_API( char*, il2cpp_type_get_name, (const Il2CppType *type) ); + +// image +DO_API( const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_name, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_filename, (const Il2CppImage *image) ); +DO_API( const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage* image) ); + +// Memory information +DO_API( Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, () ); +DO_API( void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot* snapshot) ); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +#if IL2CPP_DEBUGGER_ENABLED +// debug +DO_API( const Il2CppDebugTypeInfo*, il2cpp_debug_get_class_info, (const Il2CppClass *klass) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_class_get_document, (const Il2CppDebugTypeInfo* info) ); +DO_API( const char*, il2cpp_debug_document_get_filename, (const Il2CppDebugDocument* document) ); +DO_API( const char*, il2cpp_debug_document_get_directory, (const Il2CppDebugDocument* document) ); +DO_API( const Il2CppDebugMethodInfo*, il2cpp_debug_get_method_info, (const MethodInfo *method) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_method_get_document, (const Il2CppDebugMethodInfo* info) ); +DO_API( const int32_t*, il2cpp_debug_method_get_offset_table, (const Il2CppDebugMethodInfo* info) ); +DO_API( size_t, il2cpp_debug_method_get_code_size, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_update_frame_il_offset, (int32_t il_offset) ); +DO_API( const Il2CppDebugLocalsInfo**, il2cpp_debug_method_get_locals_info, (const Il2CppDebugMethodInfo* info) ); +DO_API( const Il2CppClass*, il2cpp_debug_local_get_type, (const Il2CppDebugLocalsInfo *info) ); +DO_API( const char*, il2cpp_debug_local_get_name, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_start_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_end_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( Il2CppObject*, il2cpp_debug_method_get_param_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( Il2CppObject*, il2cpp_debug_frame_get_local_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( void*, il2cpp_debug_method_get_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, int64_t uid, int32_t offset) ); +DO_API( void, il2cpp_debug_method_set_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location, void *data) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location) ); +#endif diff --git a/module/src/main/cpp/il2cppapi/5.3.7f1/il2cpp-class.h b/module/src/main/cpp/il2cppapi/5.3.7f1/il2cpp-class.h new file mode 100644 index 00000000..14fcdde2 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.3.7f1/il2cpp-class.h @@ -0,0 +1,985 @@ +typedef uint32_t Il2CppMethodSlot; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef enum Il2CppProfileFlags +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19 +} Il2CppProfileFlags; +typedef enum Il2CppGCEvent +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum Il2CppStat +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum StackFrameType +{ + FRAME_TYPE_MANAGED = 0, + FRAME_TYPE_DEBUGGER_INVOKE = 1, + FRAME_TYPE_MANAGED_TO_NATIVE = 2, + FRAME_TYPE_SENTINEL = 3 +} StackFrameType; +typedef enum Il2CppRuntimeUnhandledExceptionPolicy +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct { + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef int32_t il2cpp_array_size_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST= 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t GuidIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +const GuidIndex kGuidIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +static inline Il2CppMetadataUsage GetEncodedIndexType (EncodedMethodIndex index) +{ + return (Il2CppMetadataUsage)((index & 0xE0000000) >> 29); +} +static inline uint32_t GetDecodedMethodIndex (EncodedMethodIndex index) +{ + return index & 0x1FFFFFFFU; +} +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + CustomAttributeIndex customAttributeIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + MethodIndex reversePInvokeWrapperIndex; + int32_t marshalingFunctionsIndex; + int32_t ccwFunctionIndex; + GuidIndex guidIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + CustomAttributeIndex customAttributeIndex; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex reversePInvokeWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct Il2CppGenericMethodIndices +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyName +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t publicKeyToken[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + uint32_t token; +} Il2CppImageDefinition; +typedef struct Il2CppAssembly +{ + ImageIndex imageIndex; + CustomAttributeIndex customAttributeIndex; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum { + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE +} Il2CppCharSet; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct VirtualInvokeData VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +extern bool g_il2cpp_is_fully_initialized; +typedef struct { + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef struct CustomAttributeTypeCache +{ + int count; + Il2CppClass** attributeTypes; +} CustomAttributeTypeCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(const MethodInfo*, void*, void**); +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *declaring_type; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + CustomAttributeIndex customAttributeIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + const Il2CppType* byval_arg; + const Il2CppType* this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + VirtualInvokeData* vtable; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t packingSize; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppObject* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; +} Il2CppDomain; +typedef struct Il2CppImage +{ + const char* name; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable* nameToClassHashTable; + uint32_t token; +} Il2CppImage; +typedef struct Il2CppMarshalingFunctions +{ + Il2CppMethodPointer marshal_to_native_func; + Il2CppMethodPointer marshal_from_native_func; + Il2CppMethodPointer marshal_cleanup_func; +} Il2CppMarshalingFunctions; +typedef struct Il2CppCodeGenOptions +{ + bool enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const Il2CppMethodPointer* methodPointers; + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t delegateWrappersFromManagedToNativeCount; + const Il2CppMethodPointer* delegateWrappersFromManagedToNative; + uint32_t marshalingFunctionsCount; + const Il2CppMarshalingFunctions* marshalingFunctions; + uint32_t ccwMarshalingFunctionsCount; + const Il2CppMethodPointer* ccwMarshalingFunctions; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + GuidIndex guidCount; + const Il2CppGuid** guids; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + bool enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/5.4.0f3/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/5.4.0f3/il2cpp-api-functions.h new file mode 100644 index 00000000..f326efb2 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.4.0f3/il2cpp-api-functions.h @@ -0,0 +1,257 @@ + +DO_API( void, il2cpp_init, (const char* domain_name) ); +DO_API( void, il2cpp_shutdown, () ); +DO_API( void, il2cpp_set_config_dir, (const char *config_path) ); +DO_API( void, il2cpp_set_data_dir, (const char *data_path) ); +DO_API( void, il2cpp_set_commandline_arguments, (int argc, const char* argv[], const char* basedir) ); +DO_API( void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks* callbacks) ); +DO_API( const Il2CppImage*, il2cpp_get_corlib, () ); +DO_API( void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method) ); +DO_API( Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name) ); + +DO_API( void*, il2cpp_alloc, (size_t size) ); +DO_API( void, il2cpp_free, (void* ptr) ); + +// array +DO_API( Il2CppClass*, il2cpp_array_class_get, (Il2CppClass *element_class, uint32_t rank) ); +DO_API( uint32_t, il2cpp_array_length, (Il2CppArray* array) ); +DO_API( uint32_t, il2cpp_array_get_byte_length, (Il2CppArray *array) ); +DO_API( Il2CppArray*, il2cpp_array_new, (Il2CppClass *elementTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass *arrayTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_full, (Il2CppClass *array_class, il2cpp_array_size_t *lengths, il2cpp_array_size_t *lower_bounds) ); +DO_API( Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass *element_class, uint32_t rank, bool bounded) ); +DO_API( int, il2cpp_array_element_size, (const Il2CppClass* array_class) ); + +// assembly +DO_API( const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly *assembly) ); + +// class +DO_API( const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_generic, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_inflated, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_assignable_from, (Il2CppClass *klass, Il2CppClass *oklass) ); +DO_API( bool, il2cpp_class_is_subclass_of, (Il2CppClass *klass, Il2CppClass *klassc, bool check_interfaces) ); +DO_API( bool, il2cpp_class_has_parent, (Il2CppClass* klass, Il2CppClass* klassc) ); +DO_API( Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType* type) ); +DO_API( Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage* image, const char* namespaze, const char *name) ); +DO_API( Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType *type) ); +DO_API( Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass *klass) ); +DO_API( const EventInfo*, il2cpp_class_get_events, (Il2CppClass *klass, void* *iter)); +DO_API( FieldInfo*, il2cpp_class_get_fields, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass *klass, const char *name) ); +DO_API( FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass* klass, const char *name) ); +DO_API( const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass *klass, void* *iter) ); +DO_API( const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass *klass, const char* name, int argsCount) ); +DO_API( const char*, il2cpp_class_get_name, (Il2CppClass *klass) ); +DO_API( const char*, il2cpp_class_get_namespace, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_instance_size, (Il2CppClass *klass) ); +DO_API( size_t, il2cpp_class_num_fields, (const Il2CppClass* enumKlass) ); +DO_API( bool, il2cpp_class_is_valuetype, (const Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_value_size, (Il2CppClass *klass, uint32_t *align) ); +DO_API( int, il2cpp_class_get_flags, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_abstract, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_interface, (const Il2CppClass *klass) ); +DO_API( int, il2cpp_class_array_element_size, (const Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_from_type, (const Il2CppType *type) ); +DO_API( const Il2CppType*, il2cpp_class_get_type, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_has_attribute, (Il2CppClass *klass, Il2CppClass *attr_class) ); +DO_API( bool, il2cpp_class_has_references, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_enum, (const Il2CppClass *klass) ); +DO_API( const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass* klass) ); +DO_API( const char*, il2cpp_class_get_assemblyname, (const Il2CppClass *klass) ); + +// testing only +DO_API( size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass *klass) ); +DO_API( void, il2cpp_class_get_bitmap, (Il2CppClass *klass, size_t* bitmap) ); + +// stats +DO_API( bool, il2cpp_stats_dump_to_file, (const char *path) ); +DO_API( uint64_t, il2cpp_stats_get_value, (Il2CppStat stat) ); + +// domain +DO_API( Il2CppDomain*, il2cpp_domain_get, () ); +DO_API( const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain* domain, const char* name) ); +DO_API( const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain* domain, size_t* size) ); + +// exception +DO_API( void, il2cpp_raise_exception, (Il2CppException*) ); +DO_API( Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage* image, const char *name_space, const char *name, const char *msg) ); +DO_API( Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg) ); +DO_API( void, il2cpp_format_exception, (const Il2CppException* ex, char* message, int message_size) ); +DO_API( void, il2cpp_format_stack_trace, (const Il2CppException* ex, char* output, int output_size) ); +DO_API( void, il2cpp_unhandled_exception, (Il2CppException*) ); + +// field +DO_API( int, il2cpp_field_get_flags, (FieldInfo *field) ); +DO_API( const char*, il2cpp_field_get_name, (FieldInfo *field) ); +DO_API( Il2CppClass*, il2cpp_field_get_parent, (FieldInfo *field) ); +DO_API( size_t, il2cpp_field_get_offset, (FieldInfo *field) ); +DO_API( const Il2CppType*, il2cpp_field_get_type, (FieldInfo *field) ); +DO_API( void, il2cpp_field_get_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo *field, Il2CppObject *obj) ); +DO_API( bool, il2cpp_field_has_attribute, (FieldInfo *field, Il2CppClass *attr_class) ); +DO_API( void, il2cpp_field_set_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_get_value, (FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_set_value, (FieldInfo *field, void *value) ); + +// gc +DO_API( void, il2cpp_gc_collect, (int maxGenerations) ); +DO_API( int32_t, il2cpp_gc_collect_a_little, ()); +DO_API( void, il2cpp_gc_disable, ()); +DO_API( void, il2cpp_gc_enable, () ); +DO_API( int64_t, il2cpp_gc_get_used_size, () ); +DO_API( int64_t, il2cpp_gc_get_heap_size, () ); + +// gchandle +DO_API( uint32_t, il2cpp_gchandle_new, (Il2CppObject *obj, bool pinned) ); +DO_API( uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject *obj, bool track_resurrection) ); +DO_API( Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle) ); +DO_API( void, il2cpp_gchandle_free, (uint32_t gchandle) ); + +// liveness +DO_API( void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass* filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped) ); +DO_API( void, il2cpp_unity_liveness_calculation_end, (void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject* root, void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_statics, (void* state) ); + +// method +DO_API( const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo* method) ); +DO_API( Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo* method) ); +DO_API( const char*, il2cpp_method_get_name, (const MethodInfo *method) ); +DO_API( Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo *method, Il2CppClass *refclass) ); +DO_API( bool, il2cpp_method_is_generic, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_inflated, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_instance, (const MethodInfo *method) ); +DO_API( uint32_t, il2cpp_method_get_param_count, (const MethodInfo *method) ); +DO_API( const Il2CppType*, il2cpp_method_get_param, (const MethodInfo *method, uint32_t index) ); +DO_API( Il2CppClass*, il2cpp_method_get_class, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_has_attribute, (const MethodInfo *method, Il2CppClass *attr_class) ); +DO_API( uint32_t, il2cpp_method_get_flags, (const MethodInfo *method, uint32_t *iflags) ); +DO_API( uint32_t, il2cpp_method_get_token, (const MethodInfo *method) ); +DO_API( const char*, il2cpp_method_get_param_name, (const MethodInfo *method, uint32_t index) ); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API( void, il2cpp_profiler_install, (Il2CppProfiler *prof, Il2CppProfileFunc shutdown_callback) ); +DO_API( void, il2cpp_profiler_set_events, (Il2CppProfileFlags events) ); +DO_API( void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave) ); +DO_API( void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback) ); +DO_API( void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback) ); + +#endif + +// property +DO_API( uint32_t, il2cpp_property_get_flags, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo *prop) ); +DO_API( const char*, il2cpp_property_get_name, (PropertyInfo *prop) ); +DO_API( Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo *prop) ); + +// object +DO_API( Il2CppClass*, il2cpp_object_get_class, (Il2CppObject* obj) ); +DO_API( uint32_t, il2cpp_object_get_size, (Il2CppObject* obj) ); +DO_API( const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject *obj, const MethodInfo *method) ); +DO_API( Il2CppObject*, il2cpp_object_new, (const Il2CppClass *klass) ); +DO_API( void*, il2cpp_object_unbox, (Il2CppObject* obj) ); + +DO_API( Il2CppObject*, il2cpp_value_box, (Il2CppClass *klass, void* data) ); + +// monitor +DO_API( void, il2cpp_monitor_enter, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_enter, (Il2CppObject* obj, uint32_t timeout) ); +DO_API( void, il2cpp_monitor_exit, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse_all, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_wait, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_wait, (Il2CppObject* obj, uint32_t timeout) ); + +// runtime +DO_API( Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo *method, void *obj, void **params, Il2CppException **exc) ); +DO_API( Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo *method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc) ); +DO_API( void, il2cpp_runtime_class_init, (Il2CppClass* klass) ); +DO_API( void, il2cpp_runtime_object_init, (Il2CppObject* obj) ); + +DO_API( void, il2cpp_runtime_object_init_exception, (Il2CppObject* obj, Il2CppException** exc) ); + +DO_API( void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value) ); + +// delegate +DO_API( Il2CppAsyncResult*, il2cpp_delegate_begin_invoke, (Il2CppDelegate* delegate, void** params, Il2CppDelegate* asyncCallback, Il2CppObject* state) ); +DO_API( Il2CppObject*, il2cpp_delegate_end_invoke, (Il2CppAsyncResult* asyncResult, void **out_args) ); + +// string +DO_API( int32_t, il2cpp_string_length, (Il2CppString* str) ); +DO_API( Il2CppChar*, il2cpp_string_chars, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_new, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length) ); +DO_API( Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar *text, int32_t len) ); +DO_API( Il2CppString*, il2cpp_string_new_wrapper, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_intern, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_is_interned, (Il2CppString* str) ); + +// thread +DO_API( char*, il2cpp_thread_get_name, (Il2CppThread *thread, uint32_t *len) ); +DO_API( Il2CppThread*, il2cpp_thread_current, () ); +DO_API( Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain *domain) ); +DO_API( void, il2cpp_thread_detach, (Il2CppThread *thread) ); + +DO_API( Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t *size) ); +DO_API( bool, il2cpp_is_vm_thread, (Il2CppThread *thread) ); + +// stacktrace +DO_API( void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( void, il2cpp_thread_walk_frame_stack, (Il2CppThread* thread, Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_top_frame, (Il2CppThread* thread, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_frame_at, (Il2CppThread* thread, int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( int32_t, il2cpp_current_thread_get_stack_depth, () ); +DO_API( int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread *thread) ); + +// type +DO_API( Il2CppObject*, il2cpp_type_get_object, (const Il2CppType *type) ); +DO_API( int, il2cpp_type_get_type, (const Il2CppType *type) ); +DO_API( Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType *type) ); +DO_API( char*, il2cpp_type_get_name, (const Il2CppType *type) ); + +// image +DO_API( const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_name, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_filename, (const Il2CppImage *image) ); +DO_API( const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage* image) ); + +// Memory information +DO_API( Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, () ); +DO_API( void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot* snapshot) ); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +#if IL2CPP_DEBUGGER_ENABLED +// debug +DO_API( const Il2CppDebugTypeInfo*, il2cpp_debug_get_class_info, (const Il2CppClass *klass) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_class_get_document, (const Il2CppDebugTypeInfo* info) ); +DO_API( const char*, il2cpp_debug_document_get_filename, (const Il2CppDebugDocument* document) ); +DO_API( const char*, il2cpp_debug_document_get_directory, (const Il2CppDebugDocument* document) ); +DO_API( const Il2CppDebugMethodInfo*, il2cpp_debug_get_method_info, (const MethodInfo *method) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_method_get_document, (const Il2CppDebugMethodInfo* info) ); +DO_API( const int32_t*, il2cpp_debug_method_get_offset_table, (const Il2CppDebugMethodInfo* info) ); +DO_API( size_t, il2cpp_debug_method_get_code_size, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_update_frame_il_offset, (int32_t il_offset) ); +DO_API( const Il2CppDebugLocalsInfo**, il2cpp_debug_method_get_locals_info, (const Il2CppDebugMethodInfo* info) ); +DO_API( const Il2CppClass*, il2cpp_debug_local_get_type, (const Il2CppDebugLocalsInfo *info) ); +DO_API( const char*, il2cpp_debug_local_get_name, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_start_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_end_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( Il2CppObject*, il2cpp_debug_method_get_param_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( Il2CppObject*, il2cpp_debug_frame_get_local_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( void*, il2cpp_debug_method_get_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, int64_t uid, int32_t offset) ); +DO_API( void, il2cpp_debug_method_set_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location, void *data) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location) ); +#endif diff --git a/module/src/main/cpp/il2cppapi/5.4.0f3/il2cpp-class.h b/module/src/main/cpp/il2cppapi/5.4.0f3/il2cpp-class.h new file mode 100644 index 00000000..f538a45e --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.4.0f3/il2cpp-class.h @@ -0,0 +1,985 @@ +typedef uint32_t Il2CppMethodSlot; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef enum Il2CppProfileFlags +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19 +} Il2CppProfileFlags; +typedef enum Il2CppGCEvent +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum Il2CppStat +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum StackFrameType +{ + FRAME_TYPE_MANAGED = 0, + FRAME_TYPE_DEBUGGER_INVOKE = 1, + FRAME_TYPE_MANAGED_TO_NATIVE = 2, + FRAME_TYPE_SENTINEL = 3 +} StackFrameType; +typedef enum Il2CppRuntimeUnhandledExceptionPolicy +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct { + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef int32_t il2cpp_array_size_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST= 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t GuidIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +const GuidIndex kGuidIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +static inline Il2CppMetadataUsage GetEncodedIndexType (EncodedMethodIndex index) +{ + return (Il2CppMetadataUsage)((index & 0xE0000000) >> 29); +} +static inline uint32_t GetDecodedMethodIndex (EncodedMethodIndex index) +{ + return index & 0x1FFFFFFFU; +} +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + CustomAttributeIndex customAttributeIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + MethodIndex reversePInvokeWrapperIndex; + int32_t marshalingFunctionsIndex; + int32_t ccwFunctionIndex; + GuidIndex guidIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + CustomAttributeIndex customAttributeIndex; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex reversePInvokeWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct Il2CppGenericMethodIndices +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyName +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t publicKeyToken[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + uint32_t token; +} Il2CppImageDefinition; +typedef struct Il2CppAssembly +{ + ImageIndex imageIndex; + CustomAttributeIndex customAttributeIndex; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum { + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE +} Il2CppCharSet; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct VirtualInvokeData VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +extern bool g_il2cpp_is_fully_initialized; +typedef struct { + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef struct CustomAttributeTypeCache +{ + int count; + Il2CppClass** attributeTypes; +} CustomAttributeTypeCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(const MethodInfo*, void*, void**); +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *declaring_type; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + CustomAttributeIndex customAttributeIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + const Il2CppType* byval_arg; + const Il2CppType* this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + VirtualInvokeData* vtable; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t packingSize; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppObject* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; +} Il2CppDomain; +typedef struct Il2CppImage +{ + const char* name; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable* nameToClassHashTable; + uint32_t token; +} Il2CppImage; +typedef struct Il2CppMarshalingFunctions +{ + Il2CppMethodPointer marshal_to_native_func; + Il2CppMethodPointer marshal_from_native_func; + Il2CppMethodPointer marshal_cleanup_func; +} Il2CppMarshalingFunctions; +typedef struct Il2CppCodeGenOptions +{ + bool enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const Il2CppMethodPointer* methodPointers; + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t delegateWrappersFromManagedToNativeCount; + const Il2CppMethodPointer* delegateWrappersFromManagedToNative; + uint32_t marshalingFunctionsCount; + const Il2CppMarshalingFunctions* marshalingFunctions; + uint32_t ccwMarshalingFunctionsCount; + const Il2CppMethodPointer* ccwMarshalingFunctions; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + GuidIndex guidCount; + const Il2CppGuid** guids; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t* fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes* typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + bool enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/5.4.1f1/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/5.4.1f1/il2cpp-api-functions.h new file mode 100644 index 00000000..f326efb2 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.4.1f1/il2cpp-api-functions.h @@ -0,0 +1,257 @@ + +DO_API( void, il2cpp_init, (const char* domain_name) ); +DO_API( void, il2cpp_shutdown, () ); +DO_API( void, il2cpp_set_config_dir, (const char *config_path) ); +DO_API( void, il2cpp_set_data_dir, (const char *data_path) ); +DO_API( void, il2cpp_set_commandline_arguments, (int argc, const char* argv[], const char* basedir) ); +DO_API( void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks* callbacks) ); +DO_API( const Il2CppImage*, il2cpp_get_corlib, () ); +DO_API( void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method) ); +DO_API( Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name) ); + +DO_API( void*, il2cpp_alloc, (size_t size) ); +DO_API( void, il2cpp_free, (void* ptr) ); + +// array +DO_API( Il2CppClass*, il2cpp_array_class_get, (Il2CppClass *element_class, uint32_t rank) ); +DO_API( uint32_t, il2cpp_array_length, (Il2CppArray* array) ); +DO_API( uint32_t, il2cpp_array_get_byte_length, (Il2CppArray *array) ); +DO_API( Il2CppArray*, il2cpp_array_new, (Il2CppClass *elementTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass *arrayTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_full, (Il2CppClass *array_class, il2cpp_array_size_t *lengths, il2cpp_array_size_t *lower_bounds) ); +DO_API( Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass *element_class, uint32_t rank, bool bounded) ); +DO_API( int, il2cpp_array_element_size, (const Il2CppClass* array_class) ); + +// assembly +DO_API( const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly *assembly) ); + +// class +DO_API( const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_generic, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_inflated, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_assignable_from, (Il2CppClass *klass, Il2CppClass *oklass) ); +DO_API( bool, il2cpp_class_is_subclass_of, (Il2CppClass *klass, Il2CppClass *klassc, bool check_interfaces) ); +DO_API( bool, il2cpp_class_has_parent, (Il2CppClass* klass, Il2CppClass* klassc) ); +DO_API( Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType* type) ); +DO_API( Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage* image, const char* namespaze, const char *name) ); +DO_API( Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType *type) ); +DO_API( Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass *klass) ); +DO_API( const EventInfo*, il2cpp_class_get_events, (Il2CppClass *klass, void* *iter)); +DO_API( FieldInfo*, il2cpp_class_get_fields, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass *klass, const char *name) ); +DO_API( FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass* klass, const char *name) ); +DO_API( const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass *klass, void* *iter) ); +DO_API( const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass *klass, const char* name, int argsCount) ); +DO_API( const char*, il2cpp_class_get_name, (Il2CppClass *klass) ); +DO_API( const char*, il2cpp_class_get_namespace, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_instance_size, (Il2CppClass *klass) ); +DO_API( size_t, il2cpp_class_num_fields, (const Il2CppClass* enumKlass) ); +DO_API( bool, il2cpp_class_is_valuetype, (const Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_value_size, (Il2CppClass *klass, uint32_t *align) ); +DO_API( int, il2cpp_class_get_flags, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_abstract, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_interface, (const Il2CppClass *klass) ); +DO_API( int, il2cpp_class_array_element_size, (const Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_from_type, (const Il2CppType *type) ); +DO_API( const Il2CppType*, il2cpp_class_get_type, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_has_attribute, (Il2CppClass *klass, Il2CppClass *attr_class) ); +DO_API( bool, il2cpp_class_has_references, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_enum, (const Il2CppClass *klass) ); +DO_API( const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass* klass) ); +DO_API( const char*, il2cpp_class_get_assemblyname, (const Il2CppClass *klass) ); + +// testing only +DO_API( size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass *klass) ); +DO_API( void, il2cpp_class_get_bitmap, (Il2CppClass *klass, size_t* bitmap) ); + +// stats +DO_API( bool, il2cpp_stats_dump_to_file, (const char *path) ); +DO_API( uint64_t, il2cpp_stats_get_value, (Il2CppStat stat) ); + +// domain +DO_API( Il2CppDomain*, il2cpp_domain_get, () ); +DO_API( const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain* domain, const char* name) ); +DO_API( const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain* domain, size_t* size) ); + +// exception +DO_API( void, il2cpp_raise_exception, (Il2CppException*) ); +DO_API( Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage* image, const char *name_space, const char *name, const char *msg) ); +DO_API( Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg) ); +DO_API( void, il2cpp_format_exception, (const Il2CppException* ex, char* message, int message_size) ); +DO_API( void, il2cpp_format_stack_trace, (const Il2CppException* ex, char* output, int output_size) ); +DO_API( void, il2cpp_unhandled_exception, (Il2CppException*) ); + +// field +DO_API( int, il2cpp_field_get_flags, (FieldInfo *field) ); +DO_API( const char*, il2cpp_field_get_name, (FieldInfo *field) ); +DO_API( Il2CppClass*, il2cpp_field_get_parent, (FieldInfo *field) ); +DO_API( size_t, il2cpp_field_get_offset, (FieldInfo *field) ); +DO_API( const Il2CppType*, il2cpp_field_get_type, (FieldInfo *field) ); +DO_API( void, il2cpp_field_get_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo *field, Il2CppObject *obj) ); +DO_API( bool, il2cpp_field_has_attribute, (FieldInfo *field, Il2CppClass *attr_class) ); +DO_API( void, il2cpp_field_set_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_get_value, (FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_set_value, (FieldInfo *field, void *value) ); + +// gc +DO_API( void, il2cpp_gc_collect, (int maxGenerations) ); +DO_API( int32_t, il2cpp_gc_collect_a_little, ()); +DO_API( void, il2cpp_gc_disable, ()); +DO_API( void, il2cpp_gc_enable, () ); +DO_API( int64_t, il2cpp_gc_get_used_size, () ); +DO_API( int64_t, il2cpp_gc_get_heap_size, () ); + +// gchandle +DO_API( uint32_t, il2cpp_gchandle_new, (Il2CppObject *obj, bool pinned) ); +DO_API( uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject *obj, bool track_resurrection) ); +DO_API( Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle) ); +DO_API( void, il2cpp_gchandle_free, (uint32_t gchandle) ); + +// liveness +DO_API( void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass* filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped) ); +DO_API( void, il2cpp_unity_liveness_calculation_end, (void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject* root, void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_statics, (void* state) ); + +// method +DO_API( const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo* method) ); +DO_API( Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo* method) ); +DO_API( const char*, il2cpp_method_get_name, (const MethodInfo *method) ); +DO_API( Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo *method, Il2CppClass *refclass) ); +DO_API( bool, il2cpp_method_is_generic, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_inflated, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_instance, (const MethodInfo *method) ); +DO_API( uint32_t, il2cpp_method_get_param_count, (const MethodInfo *method) ); +DO_API( const Il2CppType*, il2cpp_method_get_param, (const MethodInfo *method, uint32_t index) ); +DO_API( Il2CppClass*, il2cpp_method_get_class, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_has_attribute, (const MethodInfo *method, Il2CppClass *attr_class) ); +DO_API( uint32_t, il2cpp_method_get_flags, (const MethodInfo *method, uint32_t *iflags) ); +DO_API( uint32_t, il2cpp_method_get_token, (const MethodInfo *method) ); +DO_API( const char*, il2cpp_method_get_param_name, (const MethodInfo *method, uint32_t index) ); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API( void, il2cpp_profiler_install, (Il2CppProfiler *prof, Il2CppProfileFunc shutdown_callback) ); +DO_API( void, il2cpp_profiler_set_events, (Il2CppProfileFlags events) ); +DO_API( void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave) ); +DO_API( void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback) ); +DO_API( void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback) ); + +#endif + +// property +DO_API( uint32_t, il2cpp_property_get_flags, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo *prop) ); +DO_API( const char*, il2cpp_property_get_name, (PropertyInfo *prop) ); +DO_API( Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo *prop) ); + +// object +DO_API( Il2CppClass*, il2cpp_object_get_class, (Il2CppObject* obj) ); +DO_API( uint32_t, il2cpp_object_get_size, (Il2CppObject* obj) ); +DO_API( const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject *obj, const MethodInfo *method) ); +DO_API( Il2CppObject*, il2cpp_object_new, (const Il2CppClass *klass) ); +DO_API( void*, il2cpp_object_unbox, (Il2CppObject* obj) ); + +DO_API( Il2CppObject*, il2cpp_value_box, (Il2CppClass *klass, void* data) ); + +// monitor +DO_API( void, il2cpp_monitor_enter, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_enter, (Il2CppObject* obj, uint32_t timeout) ); +DO_API( void, il2cpp_monitor_exit, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse_all, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_wait, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_wait, (Il2CppObject* obj, uint32_t timeout) ); + +// runtime +DO_API( Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo *method, void *obj, void **params, Il2CppException **exc) ); +DO_API( Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo *method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc) ); +DO_API( void, il2cpp_runtime_class_init, (Il2CppClass* klass) ); +DO_API( void, il2cpp_runtime_object_init, (Il2CppObject* obj) ); + +DO_API( void, il2cpp_runtime_object_init_exception, (Il2CppObject* obj, Il2CppException** exc) ); + +DO_API( void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value) ); + +// delegate +DO_API( Il2CppAsyncResult*, il2cpp_delegate_begin_invoke, (Il2CppDelegate* delegate, void** params, Il2CppDelegate* asyncCallback, Il2CppObject* state) ); +DO_API( Il2CppObject*, il2cpp_delegate_end_invoke, (Il2CppAsyncResult* asyncResult, void **out_args) ); + +// string +DO_API( int32_t, il2cpp_string_length, (Il2CppString* str) ); +DO_API( Il2CppChar*, il2cpp_string_chars, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_new, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length) ); +DO_API( Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar *text, int32_t len) ); +DO_API( Il2CppString*, il2cpp_string_new_wrapper, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_intern, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_is_interned, (Il2CppString* str) ); + +// thread +DO_API( char*, il2cpp_thread_get_name, (Il2CppThread *thread, uint32_t *len) ); +DO_API( Il2CppThread*, il2cpp_thread_current, () ); +DO_API( Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain *domain) ); +DO_API( void, il2cpp_thread_detach, (Il2CppThread *thread) ); + +DO_API( Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t *size) ); +DO_API( bool, il2cpp_is_vm_thread, (Il2CppThread *thread) ); + +// stacktrace +DO_API( void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( void, il2cpp_thread_walk_frame_stack, (Il2CppThread* thread, Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_top_frame, (Il2CppThread* thread, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_frame_at, (Il2CppThread* thread, int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( int32_t, il2cpp_current_thread_get_stack_depth, () ); +DO_API( int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread *thread) ); + +// type +DO_API( Il2CppObject*, il2cpp_type_get_object, (const Il2CppType *type) ); +DO_API( int, il2cpp_type_get_type, (const Il2CppType *type) ); +DO_API( Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType *type) ); +DO_API( char*, il2cpp_type_get_name, (const Il2CppType *type) ); + +// image +DO_API( const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_name, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_filename, (const Il2CppImage *image) ); +DO_API( const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage* image) ); + +// Memory information +DO_API( Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, () ); +DO_API( void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot* snapshot) ); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +#if IL2CPP_DEBUGGER_ENABLED +// debug +DO_API( const Il2CppDebugTypeInfo*, il2cpp_debug_get_class_info, (const Il2CppClass *klass) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_class_get_document, (const Il2CppDebugTypeInfo* info) ); +DO_API( const char*, il2cpp_debug_document_get_filename, (const Il2CppDebugDocument* document) ); +DO_API( const char*, il2cpp_debug_document_get_directory, (const Il2CppDebugDocument* document) ); +DO_API( const Il2CppDebugMethodInfo*, il2cpp_debug_get_method_info, (const MethodInfo *method) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_method_get_document, (const Il2CppDebugMethodInfo* info) ); +DO_API( const int32_t*, il2cpp_debug_method_get_offset_table, (const Il2CppDebugMethodInfo* info) ); +DO_API( size_t, il2cpp_debug_method_get_code_size, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_update_frame_il_offset, (int32_t il_offset) ); +DO_API( const Il2CppDebugLocalsInfo**, il2cpp_debug_method_get_locals_info, (const Il2CppDebugMethodInfo* info) ); +DO_API( const Il2CppClass*, il2cpp_debug_local_get_type, (const Il2CppDebugLocalsInfo *info) ); +DO_API( const char*, il2cpp_debug_local_get_name, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_start_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_end_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( Il2CppObject*, il2cpp_debug_method_get_param_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( Il2CppObject*, il2cpp_debug_frame_get_local_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( void*, il2cpp_debug_method_get_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, int64_t uid, int32_t offset) ); +DO_API( void, il2cpp_debug_method_set_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location, void *data) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location) ); +#endif diff --git a/module/src/main/cpp/il2cppapi/5.4.1f1/il2cpp-class.h b/module/src/main/cpp/il2cppapi/5.4.1f1/il2cpp-class.h new file mode 100644 index 00000000..14fcdde2 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.4.1f1/il2cpp-class.h @@ -0,0 +1,985 @@ +typedef uint32_t Il2CppMethodSlot; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef enum Il2CppProfileFlags +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19 +} Il2CppProfileFlags; +typedef enum Il2CppGCEvent +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum Il2CppStat +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum StackFrameType +{ + FRAME_TYPE_MANAGED = 0, + FRAME_TYPE_DEBUGGER_INVOKE = 1, + FRAME_TYPE_MANAGED_TO_NATIVE = 2, + FRAME_TYPE_SENTINEL = 3 +} StackFrameType; +typedef enum Il2CppRuntimeUnhandledExceptionPolicy +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct { + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef int32_t il2cpp_array_size_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST= 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t GuidIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +const GuidIndex kGuidIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +static inline Il2CppMetadataUsage GetEncodedIndexType (EncodedMethodIndex index) +{ + return (Il2CppMetadataUsage)((index & 0xE0000000) >> 29); +} +static inline uint32_t GetDecodedMethodIndex (EncodedMethodIndex index) +{ + return index & 0x1FFFFFFFU; +} +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + CustomAttributeIndex customAttributeIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + MethodIndex reversePInvokeWrapperIndex; + int32_t marshalingFunctionsIndex; + int32_t ccwFunctionIndex; + GuidIndex guidIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + CustomAttributeIndex customAttributeIndex; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex reversePInvokeWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct Il2CppGenericMethodIndices +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyName +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t publicKeyToken[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + uint32_t token; +} Il2CppImageDefinition; +typedef struct Il2CppAssembly +{ + ImageIndex imageIndex; + CustomAttributeIndex customAttributeIndex; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum { + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE +} Il2CppCharSet; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct VirtualInvokeData VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +extern bool g_il2cpp_is_fully_initialized; +typedef struct { + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef struct CustomAttributeTypeCache +{ + int count; + Il2CppClass** attributeTypes; +} CustomAttributeTypeCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(const MethodInfo*, void*, void**); +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *declaring_type; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + CustomAttributeIndex customAttributeIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + const Il2CppType* byval_arg; + const Il2CppType* this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + VirtualInvokeData* vtable; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t packingSize; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppObject* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; +} Il2CppDomain; +typedef struct Il2CppImage +{ + const char* name; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable* nameToClassHashTable; + uint32_t token; +} Il2CppImage; +typedef struct Il2CppMarshalingFunctions +{ + Il2CppMethodPointer marshal_to_native_func; + Il2CppMethodPointer marshal_from_native_func; + Il2CppMethodPointer marshal_cleanup_func; +} Il2CppMarshalingFunctions; +typedef struct Il2CppCodeGenOptions +{ + bool enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const Il2CppMethodPointer* methodPointers; + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t delegateWrappersFromManagedToNativeCount; + const Il2CppMethodPointer* delegateWrappersFromManagedToNative; + uint32_t marshalingFunctionsCount; + const Il2CppMarshalingFunctions* marshalingFunctions; + uint32_t ccwMarshalingFunctionsCount; + const Il2CppMethodPointer* ccwMarshalingFunctions; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + GuidIndex guidCount; + const Il2CppGuid** guids; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + bool enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/5.4.4f1/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/5.4.4f1/il2cpp-api-functions.h new file mode 100644 index 00000000..f326efb2 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.4.4f1/il2cpp-api-functions.h @@ -0,0 +1,257 @@ + +DO_API( void, il2cpp_init, (const char* domain_name) ); +DO_API( void, il2cpp_shutdown, () ); +DO_API( void, il2cpp_set_config_dir, (const char *config_path) ); +DO_API( void, il2cpp_set_data_dir, (const char *data_path) ); +DO_API( void, il2cpp_set_commandline_arguments, (int argc, const char* argv[], const char* basedir) ); +DO_API( void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks* callbacks) ); +DO_API( const Il2CppImage*, il2cpp_get_corlib, () ); +DO_API( void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method) ); +DO_API( Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name) ); + +DO_API( void*, il2cpp_alloc, (size_t size) ); +DO_API( void, il2cpp_free, (void* ptr) ); + +// array +DO_API( Il2CppClass*, il2cpp_array_class_get, (Il2CppClass *element_class, uint32_t rank) ); +DO_API( uint32_t, il2cpp_array_length, (Il2CppArray* array) ); +DO_API( uint32_t, il2cpp_array_get_byte_length, (Il2CppArray *array) ); +DO_API( Il2CppArray*, il2cpp_array_new, (Il2CppClass *elementTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass *arrayTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_full, (Il2CppClass *array_class, il2cpp_array_size_t *lengths, il2cpp_array_size_t *lower_bounds) ); +DO_API( Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass *element_class, uint32_t rank, bool bounded) ); +DO_API( int, il2cpp_array_element_size, (const Il2CppClass* array_class) ); + +// assembly +DO_API( const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly *assembly) ); + +// class +DO_API( const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_generic, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_inflated, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_assignable_from, (Il2CppClass *klass, Il2CppClass *oklass) ); +DO_API( bool, il2cpp_class_is_subclass_of, (Il2CppClass *klass, Il2CppClass *klassc, bool check_interfaces) ); +DO_API( bool, il2cpp_class_has_parent, (Il2CppClass* klass, Il2CppClass* klassc) ); +DO_API( Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType* type) ); +DO_API( Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage* image, const char* namespaze, const char *name) ); +DO_API( Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType *type) ); +DO_API( Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass *klass) ); +DO_API( const EventInfo*, il2cpp_class_get_events, (Il2CppClass *klass, void* *iter)); +DO_API( FieldInfo*, il2cpp_class_get_fields, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass *klass, const char *name) ); +DO_API( FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass* klass, const char *name) ); +DO_API( const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass *klass, void* *iter) ); +DO_API( const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass *klass, const char* name, int argsCount) ); +DO_API( const char*, il2cpp_class_get_name, (Il2CppClass *klass) ); +DO_API( const char*, il2cpp_class_get_namespace, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_instance_size, (Il2CppClass *klass) ); +DO_API( size_t, il2cpp_class_num_fields, (const Il2CppClass* enumKlass) ); +DO_API( bool, il2cpp_class_is_valuetype, (const Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_value_size, (Il2CppClass *klass, uint32_t *align) ); +DO_API( int, il2cpp_class_get_flags, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_abstract, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_interface, (const Il2CppClass *klass) ); +DO_API( int, il2cpp_class_array_element_size, (const Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_from_type, (const Il2CppType *type) ); +DO_API( const Il2CppType*, il2cpp_class_get_type, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_has_attribute, (Il2CppClass *klass, Il2CppClass *attr_class) ); +DO_API( bool, il2cpp_class_has_references, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_enum, (const Il2CppClass *klass) ); +DO_API( const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass* klass) ); +DO_API( const char*, il2cpp_class_get_assemblyname, (const Il2CppClass *klass) ); + +// testing only +DO_API( size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass *klass) ); +DO_API( void, il2cpp_class_get_bitmap, (Il2CppClass *klass, size_t* bitmap) ); + +// stats +DO_API( bool, il2cpp_stats_dump_to_file, (const char *path) ); +DO_API( uint64_t, il2cpp_stats_get_value, (Il2CppStat stat) ); + +// domain +DO_API( Il2CppDomain*, il2cpp_domain_get, () ); +DO_API( const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain* domain, const char* name) ); +DO_API( const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain* domain, size_t* size) ); + +// exception +DO_API( void, il2cpp_raise_exception, (Il2CppException*) ); +DO_API( Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage* image, const char *name_space, const char *name, const char *msg) ); +DO_API( Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg) ); +DO_API( void, il2cpp_format_exception, (const Il2CppException* ex, char* message, int message_size) ); +DO_API( void, il2cpp_format_stack_trace, (const Il2CppException* ex, char* output, int output_size) ); +DO_API( void, il2cpp_unhandled_exception, (Il2CppException*) ); + +// field +DO_API( int, il2cpp_field_get_flags, (FieldInfo *field) ); +DO_API( const char*, il2cpp_field_get_name, (FieldInfo *field) ); +DO_API( Il2CppClass*, il2cpp_field_get_parent, (FieldInfo *field) ); +DO_API( size_t, il2cpp_field_get_offset, (FieldInfo *field) ); +DO_API( const Il2CppType*, il2cpp_field_get_type, (FieldInfo *field) ); +DO_API( void, il2cpp_field_get_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo *field, Il2CppObject *obj) ); +DO_API( bool, il2cpp_field_has_attribute, (FieldInfo *field, Il2CppClass *attr_class) ); +DO_API( void, il2cpp_field_set_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_get_value, (FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_set_value, (FieldInfo *field, void *value) ); + +// gc +DO_API( void, il2cpp_gc_collect, (int maxGenerations) ); +DO_API( int32_t, il2cpp_gc_collect_a_little, ()); +DO_API( void, il2cpp_gc_disable, ()); +DO_API( void, il2cpp_gc_enable, () ); +DO_API( int64_t, il2cpp_gc_get_used_size, () ); +DO_API( int64_t, il2cpp_gc_get_heap_size, () ); + +// gchandle +DO_API( uint32_t, il2cpp_gchandle_new, (Il2CppObject *obj, bool pinned) ); +DO_API( uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject *obj, bool track_resurrection) ); +DO_API( Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle) ); +DO_API( void, il2cpp_gchandle_free, (uint32_t gchandle) ); + +// liveness +DO_API( void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass* filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped) ); +DO_API( void, il2cpp_unity_liveness_calculation_end, (void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject* root, void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_statics, (void* state) ); + +// method +DO_API( const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo* method) ); +DO_API( Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo* method) ); +DO_API( const char*, il2cpp_method_get_name, (const MethodInfo *method) ); +DO_API( Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo *method, Il2CppClass *refclass) ); +DO_API( bool, il2cpp_method_is_generic, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_inflated, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_instance, (const MethodInfo *method) ); +DO_API( uint32_t, il2cpp_method_get_param_count, (const MethodInfo *method) ); +DO_API( const Il2CppType*, il2cpp_method_get_param, (const MethodInfo *method, uint32_t index) ); +DO_API( Il2CppClass*, il2cpp_method_get_class, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_has_attribute, (const MethodInfo *method, Il2CppClass *attr_class) ); +DO_API( uint32_t, il2cpp_method_get_flags, (const MethodInfo *method, uint32_t *iflags) ); +DO_API( uint32_t, il2cpp_method_get_token, (const MethodInfo *method) ); +DO_API( const char*, il2cpp_method_get_param_name, (const MethodInfo *method, uint32_t index) ); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API( void, il2cpp_profiler_install, (Il2CppProfiler *prof, Il2CppProfileFunc shutdown_callback) ); +DO_API( void, il2cpp_profiler_set_events, (Il2CppProfileFlags events) ); +DO_API( void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave) ); +DO_API( void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback) ); +DO_API( void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback) ); + +#endif + +// property +DO_API( uint32_t, il2cpp_property_get_flags, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo *prop) ); +DO_API( const char*, il2cpp_property_get_name, (PropertyInfo *prop) ); +DO_API( Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo *prop) ); + +// object +DO_API( Il2CppClass*, il2cpp_object_get_class, (Il2CppObject* obj) ); +DO_API( uint32_t, il2cpp_object_get_size, (Il2CppObject* obj) ); +DO_API( const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject *obj, const MethodInfo *method) ); +DO_API( Il2CppObject*, il2cpp_object_new, (const Il2CppClass *klass) ); +DO_API( void*, il2cpp_object_unbox, (Il2CppObject* obj) ); + +DO_API( Il2CppObject*, il2cpp_value_box, (Il2CppClass *klass, void* data) ); + +// monitor +DO_API( void, il2cpp_monitor_enter, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_enter, (Il2CppObject* obj, uint32_t timeout) ); +DO_API( void, il2cpp_monitor_exit, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse_all, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_wait, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_wait, (Il2CppObject* obj, uint32_t timeout) ); + +// runtime +DO_API( Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo *method, void *obj, void **params, Il2CppException **exc) ); +DO_API( Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo *method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc) ); +DO_API( void, il2cpp_runtime_class_init, (Il2CppClass* klass) ); +DO_API( void, il2cpp_runtime_object_init, (Il2CppObject* obj) ); + +DO_API( void, il2cpp_runtime_object_init_exception, (Il2CppObject* obj, Il2CppException** exc) ); + +DO_API( void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value) ); + +// delegate +DO_API( Il2CppAsyncResult*, il2cpp_delegate_begin_invoke, (Il2CppDelegate* delegate, void** params, Il2CppDelegate* asyncCallback, Il2CppObject* state) ); +DO_API( Il2CppObject*, il2cpp_delegate_end_invoke, (Il2CppAsyncResult* asyncResult, void **out_args) ); + +// string +DO_API( int32_t, il2cpp_string_length, (Il2CppString* str) ); +DO_API( Il2CppChar*, il2cpp_string_chars, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_new, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length) ); +DO_API( Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar *text, int32_t len) ); +DO_API( Il2CppString*, il2cpp_string_new_wrapper, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_intern, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_is_interned, (Il2CppString* str) ); + +// thread +DO_API( char*, il2cpp_thread_get_name, (Il2CppThread *thread, uint32_t *len) ); +DO_API( Il2CppThread*, il2cpp_thread_current, () ); +DO_API( Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain *domain) ); +DO_API( void, il2cpp_thread_detach, (Il2CppThread *thread) ); + +DO_API( Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t *size) ); +DO_API( bool, il2cpp_is_vm_thread, (Il2CppThread *thread) ); + +// stacktrace +DO_API( void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( void, il2cpp_thread_walk_frame_stack, (Il2CppThread* thread, Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_top_frame, (Il2CppThread* thread, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_frame_at, (Il2CppThread* thread, int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( int32_t, il2cpp_current_thread_get_stack_depth, () ); +DO_API( int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread *thread) ); + +// type +DO_API( Il2CppObject*, il2cpp_type_get_object, (const Il2CppType *type) ); +DO_API( int, il2cpp_type_get_type, (const Il2CppType *type) ); +DO_API( Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType *type) ); +DO_API( char*, il2cpp_type_get_name, (const Il2CppType *type) ); + +// image +DO_API( const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_name, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_filename, (const Il2CppImage *image) ); +DO_API( const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage* image) ); + +// Memory information +DO_API( Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, () ); +DO_API( void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot* snapshot) ); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +#if IL2CPP_DEBUGGER_ENABLED +// debug +DO_API( const Il2CppDebugTypeInfo*, il2cpp_debug_get_class_info, (const Il2CppClass *klass) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_class_get_document, (const Il2CppDebugTypeInfo* info) ); +DO_API( const char*, il2cpp_debug_document_get_filename, (const Il2CppDebugDocument* document) ); +DO_API( const char*, il2cpp_debug_document_get_directory, (const Il2CppDebugDocument* document) ); +DO_API( const Il2CppDebugMethodInfo*, il2cpp_debug_get_method_info, (const MethodInfo *method) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_method_get_document, (const Il2CppDebugMethodInfo* info) ); +DO_API( const int32_t*, il2cpp_debug_method_get_offset_table, (const Il2CppDebugMethodInfo* info) ); +DO_API( size_t, il2cpp_debug_method_get_code_size, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_update_frame_il_offset, (int32_t il_offset) ); +DO_API( const Il2CppDebugLocalsInfo**, il2cpp_debug_method_get_locals_info, (const Il2CppDebugMethodInfo* info) ); +DO_API( const Il2CppClass*, il2cpp_debug_local_get_type, (const Il2CppDebugLocalsInfo *info) ); +DO_API( const char*, il2cpp_debug_local_get_name, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_start_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_end_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( Il2CppObject*, il2cpp_debug_method_get_param_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( Il2CppObject*, il2cpp_debug_frame_get_local_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( void*, il2cpp_debug_method_get_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, int64_t uid, int32_t offset) ); +DO_API( void, il2cpp_debug_method_set_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location, void *data) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location) ); +#endif diff --git a/module/src/main/cpp/il2cppapi/5.4.4f1/il2cpp-class.h b/module/src/main/cpp/il2cppapi/5.4.4f1/il2cpp-class.h new file mode 100644 index 00000000..ea98b04d --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.4.4f1/il2cpp-class.h @@ -0,0 +1,986 @@ +typedef uint32_t Il2CppMethodSlot; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef enum Il2CppProfileFlags +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19 +} Il2CppProfileFlags; +typedef enum Il2CppGCEvent +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum Il2CppStat +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum StackFrameType +{ + FRAME_TYPE_MANAGED = 0, + FRAME_TYPE_DEBUGGER_INVOKE = 1, + FRAME_TYPE_MANAGED_TO_NATIVE = 2, + FRAME_TYPE_SENTINEL = 3 +} StackFrameType; +typedef enum Il2CppRuntimeUnhandledExceptionPolicy +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct { + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef int32_t il2cpp_array_size_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST= 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t GuidIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +const GuidIndex kGuidIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +static inline Il2CppMetadataUsage GetEncodedIndexType (EncodedMethodIndex index) +{ + return (Il2CppMetadataUsage)((index & 0xE0000000) >> 29); +} +static inline uint32_t GetDecodedMethodIndex (EncodedMethodIndex index) +{ + return index & 0x1FFFFFFFU; +} +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + CustomAttributeIndex customAttributeIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + MethodIndex reversePInvokeWrapperIndex; + int32_t marshalingFunctionsIndex; + int32_t ccwFunctionIndex; + GuidIndex guidIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + CustomAttributeIndex customAttributeIndex; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex reversePInvokeWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct Il2CppGenericMethodIndices +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyName +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t publicKeyToken[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + uint32_t token; +} Il2CppImageDefinition; +typedef struct Il2CppAssembly +{ + ImageIndex imageIndex; + CustomAttributeIndex customAttributeIndex; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum { + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE +} Il2CppCharSet; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct VirtualInvokeData VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +extern bool g_il2cpp_is_fully_initialized; +typedef struct { + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef struct CustomAttributeTypeCache +{ + int count; + Il2CppClass** attributeTypes; +} CustomAttributeTypeCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(const MethodInfo*, void*, void**); +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *declaring_type; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + CustomAttributeIndex customAttributeIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + const Il2CppType* byval_arg; + const Il2CppType* this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + VirtualInvokeData* vtable; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t genericRecursionDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t packingSize; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppObject* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; +} Il2CppDomain; +typedef struct Il2CppImage +{ + const char* name; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable* nameToClassHashTable; + uint32_t token; +} Il2CppImage; +typedef struct Il2CppMarshalingFunctions +{ + Il2CppMethodPointer marshal_to_native_func; + Il2CppMethodPointer marshal_from_native_func; + Il2CppMethodPointer marshal_cleanup_func; +} Il2CppMarshalingFunctions; +typedef struct Il2CppCodeGenOptions +{ + bool enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const Il2CppMethodPointer* methodPointers; + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t delegateWrappersFromManagedToNativeCount; + const Il2CppMethodPointer* delegateWrappersFromManagedToNative; + uint32_t marshalingFunctionsCount; + const Il2CppMarshalingFunctions* marshalingFunctions; + uint32_t ccwMarshalingFunctionsCount; + const Il2CppMethodPointer* ccwMarshalingFunctions; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + GuidIndex guidCount; + const Il2CppGuid** guids; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + bool enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/5.5.0f3/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/5.5.0f3/il2cpp-api-functions.h new file mode 100644 index 00000000..abef95da --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.5.0f3/il2cpp-api-functions.h @@ -0,0 +1,258 @@ + +DO_API( void, il2cpp_init, (const char* domain_name) ); +DO_API( void, il2cpp_init_utf16, (const Il2CppChar* domain_name) ); +DO_API( void, il2cpp_shutdown, () ); +DO_API( void, il2cpp_set_config_dir, (const char *config_path) ); +DO_API( void, il2cpp_set_data_dir, (const char *data_path) ); +DO_API( void, il2cpp_set_commandline_arguments, (int argc, const char* const argv[], const char* basedir) ); +DO_API( void, il2cpp_set_commandline_arguments_utf16, (int argc, const Il2CppChar* const argv[], const char* basedir) ); +DO_API( void, il2cpp_set_config_utf16, (const Il2CppChar* executablePath) ); +DO_API( void, il2cpp_set_config, (const char* executablePath)); + +DO_API( void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks* callbacks) ); +DO_API( const Il2CppImage*, il2cpp_get_corlib, () ); +DO_API( void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method) ); +DO_API( Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name) ); + +DO_API( void*, il2cpp_alloc, (size_t size) ); +DO_API( void, il2cpp_free, (void* ptr) ); + +// array +DO_API( Il2CppClass*, il2cpp_array_class_get, (Il2CppClass *element_class, uint32_t rank) ); +DO_API( uint32_t, il2cpp_array_length, (Il2CppArray* array) ); +DO_API( uint32_t, il2cpp_array_get_byte_length, (Il2CppArray *array) ); +DO_API( Il2CppArray*, il2cpp_array_new, (Il2CppClass *elementTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass *arrayTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_full, (Il2CppClass *array_class, il2cpp_array_size_t *lengths, il2cpp_array_size_t *lower_bounds) ); +DO_API( Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass *element_class, uint32_t rank, bool bounded) ); +DO_API( int, il2cpp_array_element_size, (const Il2CppClass* array_class) ); + +// assembly +DO_API( const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly *assembly) ); + +// class +DO_API( const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_generic, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_inflated, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_assignable_from, (Il2CppClass *klass, Il2CppClass *oklass) ); +DO_API( bool, il2cpp_class_is_subclass_of, (Il2CppClass *klass, Il2CppClass *klassc, bool check_interfaces) ); +DO_API( bool, il2cpp_class_has_parent, (Il2CppClass* klass, Il2CppClass* klassc) ); +DO_API( Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType* type) ); +DO_API( Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage* image, const char* namespaze, const char *name) ); +DO_API( Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType *type) ); +DO_API( Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass *klass) ); +DO_API( const EventInfo*, il2cpp_class_get_events, (Il2CppClass *klass, void* *iter)); +DO_API( FieldInfo*, il2cpp_class_get_fields, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass *klass, const char *name) ); +DO_API( FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass* klass, const char *name) ); +DO_API( const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass *klass, void* *iter) ); +DO_API( const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass *klass, const char* name, int argsCount) ); +DO_API( const char*, il2cpp_class_get_name, (Il2CppClass *klass) ); +DO_API( const char*, il2cpp_class_get_namespace, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_instance_size, (Il2CppClass *klass) ); +DO_API( size_t, il2cpp_class_num_fields, (const Il2CppClass* enumKlass) ); +DO_API( bool, il2cpp_class_is_valuetype, (const Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_value_size, (Il2CppClass *klass, uint32_t *align) ); +DO_API( int, il2cpp_class_get_flags, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_abstract, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_interface, (const Il2CppClass *klass) ); +DO_API( int, il2cpp_class_array_element_size, (const Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_from_type, (const Il2CppType *type) ); +DO_API( const Il2CppType*, il2cpp_class_get_type, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_has_attribute, (Il2CppClass *klass, Il2CppClass *attr_class) ); +DO_API( bool, il2cpp_class_has_references, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_enum, (const Il2CppClass *klass) ); +DO_API( const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass* klass) ); +DO_API( const char*, il2cpp_class_get_assemblyname, (const Il2CppClass *klass) ); + +// testing only +DO_API( size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass *klass) ); +DO_API( void, il2cpp_class_get_bitmap, (Il2CppClass *klass, size_t* bitmap) ); + +// stats +DO_API( bool, il2cpp_stats_dump_to_file, (const char *path) ); +DO_API( uint64_t, il2cpp_stats_get_value, (Il2CppStat stat) ); + +// domain +DO_API( Il2CppDomain*, il2cpp_domain_get, () ); +DO_API( const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain* domain, const char* name) ); +DO_API( const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain* domain, size_t* size) ); + +// exception +DO_API( void, il2cpp_raise_exception, (Il2CppException*) ); +DO_API( Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage* image, const char *name_space, const char *name, const char *msg) ); +DO_API( Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg) ); +DO_API( void, il2cpp_format_exception, (const Il2CppException* ex, char* message, int message_size) ); +DO_API( void, il2cpp_format_stack_trace, (const Il2CppException* ex, char* output, int output_size) ); +DO_API( void, il2cpp_unhandled_exception, (Il2CppException*) ); + +// field +DO_API( int, il2cpp_field_get_flags, (FieldInfo *field) ); +DO_API( const char*, il2cpp_field_get_name, (FieldInfo *field) ); +DO_API( Il2CppClass*, il2cpp_field_get_parent, (FieldInfo *field) ); +DO_API( size_t, il2cpp_field_get_offset, (FieldInfo *field) ); +DO_API( const Il2CppType*, il2cpp_field_get_type, (FieldInfo *field) ); +DO_API( void, il2cpp_field_get_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo *field, Il2CppObject *obj) ); +DO_API( bool, il2cpp_field_has_attribute, (FieldInfo *field, Il2CppClass *attr_class) ); +DO_API( void, il2cpp_field_set_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_get_value, (FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_set_value, (FieldInfo *field, void *value) ); + +// gc +DO_API( void, il2cpp_gc_collect, (int maxGenerations) ); +DO_API( int32_t, il2cpp_gc_collect_a_little, ()); +DO_API( void, il2cpp_gc_disable, ()); +DO_API( void, il2cpp_gc_enable, () ); +DO_API( int64_t, il2cpp_gc_get_used_size, () ); +DO_API( int64_t, il2cpp_gc_get_heap_size, () ); + +// gchandle +DO_API( uint32_t, il2cpp_gchandle_new, (Il2CppObject *obj, bool pinned) ); +DO_API( uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject *obj, bool track_resurrection) ); +DO_API( Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle) ); +DO_API( void, il2cpp_gchandle_free, (uint32_t gchandle) ); + +// liveness +DO_API( void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass* filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped) ); +DO_API( void, il2cpp_unity_liveness_calculation_end, (void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject* root, void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_statics, (void* state) ); + +// method +DO_API( const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo* method) ); +DO_API( Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo* method) ); +DO_API( const char*, il2cpp_method_get_name, (const MethodInfo *method) ); +DO_API( Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo *method, Il2CppClass *refclass) ); +DO_API( bool, il2cpp_method_is_generic, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_inflated, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_instance, (const MethodInfo *method) ); +DO_API( uint32_t, il2cpp_method_get_param_count, (const MethodInfo *method) ); +DO_API( const Il2CppType*, il2cpp_method_get_param, (const MethodInfo *method, uint32_t index) ); +DO_API( Il2CppClass*, il2cpp_method_get_class, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_has_attribute, (const MethodInfo *method, Il2CppClass *attr_class) ); +DO_API( uint32_t, il2cpp_method_get_flags, (const MethodInfo *method, uint32_t *iflags) ); +DO_API( uint32_t, il2cpp_method_get_token, (const MethodInfo *method) ); +DO_API( const char*, il2cpp_method_get_param_name, (const MethodInfo *method, uint32_t index) ); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API( void, il2cpp_profiler_install, (Il2CppProfiler *prof, Il2CppProfileFunc shutdown_callback) ); +DO_API( void, il2cpp_profiler_set_events, (Il2CppProfileFlags events) ); +DO_API( void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave) ); +DO_API( void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback) ); +DO_API( void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback) ); + +#endif + +// property +DO_API( uint32_t, il2cpp_property_get_flags, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo *prop) ); +DO_API( const char*, il2cpp_property_get_name, (PropertyInfo *prop) ); +DO_API( Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo *prop) ); + +// object +DO_API( Il2CppClass*, il2cpp_object_get_class, (Il2CppObject* obj) ); +DO_API( uint32_t, il2cpp_object_get_size, (Il2CppObject* obj) ); +DO_API( const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject *obj, const MethodInfo *method) ); +DO_API( Il2CppObject*, il2cpp_object_new, (const Il2CppClass *klass) ); +DO_API( void*, il2cpp_object_unbox, (Il2CppObject* obj) ); + +DO_API( Il2CppObject*, il2cpp_value_box, (Il2CppClass *klass, void* data) ); + +// monitor +DO_API( void, il2cpp_monitor_enter, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_enter, (Il2CppObject* obj, uint32_t timeout) ); +DO_API( void, il2cpp_monitor_exit, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse_all, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_wait, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_wait, (Il2CppObject* obj, uint32_t timeout) ); + +// runtime +DO_API( Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo *method, void *obj, void **params, Il2CppException **exc) ); +DO_API( Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo *method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc) ); +DO_API( void, il2cpp_runtime_class_init, (Il2CppClass* klass) ); +DO_API( void, il2cpp_runtime_object_init, (Il2CppObject* obj) ); + +DO_API( void, il2cpp_runtime_object_init_exception, (Il2CppObject* obj, Il2CppException** exc) ); + +DO_API( void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value) ); + +// string +DO_API( int32_t, il2cpp_string_length, (Il2CppString* str) ); +DO_API( Il2CppChar*, il2cpp_string_chars, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_new, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length) ); +DO_API( Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar *text, int32_t len) ); +DO_API( Il2CppString*, il2cpp_string_new_wrapper, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_intern, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_is_interned, (Il2CppString* str) ); + +// thread +DO_API( char*, il2cpp_thread_get_name, (Il2CppThread *thread, uint32_t *len) ); +DO_API( Il2CppThread*, il2cpp_thread_current, () ); +DO_API( Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain *domain) ); +DO_API( void, il2cpp_thread_detach, (Il2CppThread *thread) ); + +DO_API( Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t *size) ); +DO_API( bool, il2cpp_is_vm_thread, (Il2CppThread *thread) ); + +// stacktrace +DO_API( void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( void, il2cpp_thread_walk_frame_stack, (Il2CppThread* thread, Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_top_frame, (Il2CppThread* thread, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_frame_at, (Il2CppThread* thread, int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( int32_t, il2cpp_current_thread_get_stack_depth, () ); +DO_API( int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread *thread) ); + +// type +DO_API( Il2CppObject*, il2cpp_type_get_object, (const Il2CppType *type) ); +DO_API( int, il2cpp_type_get_type, (const Il2CppType *type) ); +DO_API( Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType *type) ); +DO_API( char*, il2cpp_type_get_name, (const Il2CppType *type) ); + +// image +DO_API( const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_name, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_filename, (const Il2CppImage *image) ); +DO_API( const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage* image) ); + +// Memory information +DO_API( Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, () ); +DO_API( void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot* snapshot) ); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +#if IL2CPP_DEBUGGER_ENABLED +// debug +DO_API( const Il2CppDebugTypeInfo*, il2cpp_debug_get_class_info, (const Il2CppClass *klass) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_class_get_document, (const Il2CppDebugTypeInfo* info) ); +DO_API( const char*, il2cpp_debug_document_get_filename, (const Il2CppDebugDocument* document) ); +DO_API( const char*, il2cpp_debug_document_get_directory, (const Il2CppDebugDocument* document) ); +DO_API( const Il2CppDebugMethodInfo*, il2cpp_debug_get_method_info, (const MethodInfo *method) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_method_get_document, (const Il2CppDebugMethodInfo* info) ); +DO_API( const int32_t*, il2cpp_debug_method_get_offset_table, (const Il2CppDebugMethodInfo* info) ); +DO_API( size_t, il2cpp_debug_method_get_code_size, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_update_frame_il_offset, (int32_t il_offset) ); +DO_API( const Il2CppDebugLocalsInfo**, il2cpp_debug_method_get_locals_info, (const Il2CppDebugMethodInfo* info) ); +DO_API( const Il2CppClass*, il2cpp_debug_local_get_type, (const Il2CppDebugLocalsInfo *info) ); +DO_API( const char*, il2cpp_debug_local_get_name, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_start_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_end_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( Il2CppObject*, il2cpp_debug_method_get_param_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( Il2CppObject*, il2cpp_debug_frame_get_local_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( void*, il2cpp_debug_method_get_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, int64_t uid, int32_t offset) ); +DO_API( void, il2cpp_debug_method_set_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location, void *data) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location) ); +#endif diff --git a/module/src/main/cpp/il2cppapi/5.5.0f3/il2cpp-class.h b/module/src/main/cpp/il2cppapi/5.5.0f3/il2cpp-class.h new file mode 100644 index 00000000..efff4041 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.5.0f3/il2cpp-class.h @@ -0,0 +1,1002 @@ +typedef uint32_t Il2CppMethodSlot; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef enum Il2CppProfileFlags +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19 +} Il2CppProfileFlags; +typedef enum Il2CppGCEvent +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum Il2CppStat +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum StackFrameType +{ + FRAME_TYPE_MANAGED = 0, + FRAME_TYPE_DEBUGGER_INVOKE = 1, + FRAME_TYPE_MANAGED_TO_NATIVE = 2, + FRAME_TYPE_SENTINEL = 3 +} StackFrameType; +typedef enum Il2CppRuntimeUnhandledExceptionPolicy +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct { + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef int32_t il2cpp_array_size_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST= 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t GuidIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +const GuidIndex kGuidIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +static inline Il2CppMetadataUsage GetEncodedIndexType (EncodedMethodIndex index) +{ + return (Il2CppMetadataUsage)((index & 0xE0000000) >> 29); +} +static inline uint32_t GetDecodedMethodIndex (EncodedMethodIndex index) +{ + return index & 0x1FFFFFFFU; +} +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + CustomAttributeIndex customAttributeIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + MethodIndex reversePInvokeWrapperIndex; + int32_t marshalingFunctionsIndex; + int32_t ccwFunctionIndex; + GuidIndex guidIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + CustomAttributeIndex customAttributeIndex; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex reversePInvokeWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct Il2CppGenericMethodIndices +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyName +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t publicKeyToken[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + uint32_t token; +} Il2CppImageDefinition; +typedef struct Il2CppAssembly +{ + ImageIndex imageIndex; + CustomAttributeIndex customAttributeIndex; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +typedef struct Il2CppRange +{ + int32_t start; + int32_t length; +} Il2CppRange; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; + int32_t unresolvedVirtualCallParameterTypesOffset; + int32_t unresolvedVirtualCallParameterTypesCount; + int32_t unresolvedVirtualCallParameterRangesOffset; + int32_t unresolvedVirtualCallParameterRangesCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum { + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE +} Il2CppCharSet; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppAppDomainSetup Il2CppAppDomainSetup; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct VirtualInvokeData +{ + Il2CppMethodPointer methodPtr; + const MethodInfo* method; +} VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +extern bool g_il2cpp_is_fully_initialized; +typedef struct { + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef struct CustomAttributeTypeCache +{ + int count; + Il2CppClass** attributeTypes; +} CustomAttributeTypeCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(const MethodInfo*, void*, void**); +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *declaring_type; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + CustomAttributeIndex customAttributeIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + const Il2CppType* byval_arg; + const Il2CppType* this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t packingSize; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; + uint8_t is_vtable_initialized : 1; + VirtualInvokeData vtable[32]; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppAppDomainSetup* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; +} Il2CppDomain; +typedef struct Il2CppImage +{ + const char* name; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable* nameToClassHashTable; + uint32_t token; +} Il2CppImage; +typedef struct Il2CppMarshalingFunctions +{ + Il2CppMethodPointer marshal_to_native_func; + Il2CppMethodPointer marshal_from_native_func; + Il2CppMethodPointer marshal_cleanup_func; +} Il2CppMarshalingFunctions; +typedef struct Il2CppCodeGenOptions +{ + bool enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const Il2CppMethodPointer* methodPointers; + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t delegateWrappersFromManagedToNativeCount; + const Il2CppMethodPointer* delegateWrappersFromManagedToNative; + uint32_t marshalingFunctionsCount; + const Il2CppMarshalingFunctions* marshalingFunctions; + uint32_t ccwMarshalingFunctionsCount; + const Il2CppMethodPointer* ccwMarshalingFunctions; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + GuidIndex guidCount; + const Il2CppGuid** guids; + uint32_t unresolvedVirtualCallCount; + const Il2CppMethodPointer* unresolvedVirtualCallPointers; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + bool enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/5.5.1f1/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/5.5.1f1/il2cpp-api-functions.h new file mode 100644 index 00000000..abef95da --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.5.1f1/il2cpp-api-functions.h @@ -0,0 +1,258 @@ + +DO_API( void, il2cpp_init, (const char* domain_name) ); +DO_API( void, il2cpp_init_utf16, (const Il2CppChar* domain_name) ); +DO_API( void, il2cpp_shutdown, () ); +DO_API( void, il2cpp_set_config_dir, (const char *config_path) ); +DO_API( void, il2cpp_set_data_dir, (const char *data_path) ); +DO_API( void, il2cpp_set_commandline_arguments, (int argc, const char* const argv[], const char* basedir) ); +DO_API( void, il2cpp_set_commandline_arguments_utf16, (int argc, const Il2CppChar* const argv[], const char* basedir) ); +DO_API( void, il2cpp_set_config_utf16, (const Il2CppChar* executablePath) ); +DO_API( void, il2cpp_set_config, (const char* executablePath)); + +DO_API( void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks* callbacks) ); +DO_API( const Il2CppImage*, il2cpp_get_corlib, () ); +DO_API( void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method) ); +DO_API( Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name) ); + +DO_API( void*, il2cpp_alloc, (size_t size) ); +DO_API( void, il2cpp_free, (void* ptr) ); + +// array +DO_API( Il2CppClass*, il2cpp_array_class_get, (Il2CppClass *element_class, uint32_t rank) ); +DO_API( uint32_t, il2cpp_array_length, (Il2CppArray* array) ); +DO_API( uint32_t, il2cpp_array_get_byte_length, (Il2CppArray *array) ); +DO_API( Il2CppArray*, il2cpp_array_new, (Il2CppClass *elementTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass *arrayTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_full, (Il2CppClass *array_class, il2cpp_array_size_t *lengths, il2cpp_array_size_t *lower_bounds) ); +DO_API( Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass *element_class, uint32_t rank, bool bounded) ); +DO_API( int, il2cpp_array_element_size, (const Il2CppClass* array_class) ); + +// assembly +DO_API( const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly *assembly) ); + +// class +DO_API( const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_generic, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_inflated, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_assignable_from, (Il2CppClass *klass, Il2CppClass *oklass) ); +DO_API( bool, il2cpp_class_is_subclass_of, (Il2CppClass *klass, Il2CppClass *klassc, bool check_interfaces) ); +DO_API( bool, il2cpp_class_has_parent, (Il2CppClass* klass, Il2CppClass* klassc) ); +DO_API( Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType* type) ); +DO_API( Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage* image, const char* namespaze, const char *name) ); +DO_API( Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType *type) ); +DO_API( Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass *klass) ); +DO_API( const EventInfo*, il2cpp_class_get_events, (Il2CppClass *klass, void* *iter)); +DO_API( FieldInfo*, il2cpp_class_get_fields, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass *klass, const char *name) ); +DO_API( FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass* klass, const char *name) ); +DO_API( const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass *klass, void* *iter) ); +DO_API( const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass *klass, const char* name, int argsCount) ); +DO_API( const char*, il2cpp_class_get_name, (Il2CppClass *klass) ); +DO_API( const char*, il2cpp_class_get_namespace, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_instance_size, (Il2CppClass *klass) ); +DO_API( size_t, il2cpp_class_num_fields, (const Il2CppClass* enumKlass) ); +DO_API( bool, il2cpp_class_is_valuetype, (const Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_value_size, (Il2CppClass *klass, uint32_t *align) ); +DO_API( int, il2cpp_class_get_flags, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_abstract, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_interface, (const Il2CppClass *klass) ); +DO_API( int, il2cpp_class_array_element_size, (const Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_from_type, (const Il2CppType *type) ); +DO_API( const Il2CppType*, il2cpp_class_get_type, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_has_attribute, (Il2CppClass *klass, Il2CppClass *attr_class) ); +DO_API( bool, il2cpp_class_has_references, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_enum, (const Il2CppClass *klass) ); +DO_API( const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass* klass) ); +DO_API( const char*, il2cpp_class_get_assemblyname, (const Il2CppClass *klass) ); + +// testing only +DO_API( size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass *klass) ); +DO_API( void, il2cpp_class_get_bitmap, (Il2CppClass *klass, size_t* bitmap) ); + +// stats +DO_API( bool, il2cpp_stats_dump_to_file, (const char *path) ); +DO_API( uint64_t, il2cpp_stats_get_value, (Il2CppStat stat) ); + +// domain +DO_API( Il2CppDomain*, il2cpp_domain_get, () ); +DO_API( const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain* domain, const char* name) ); +DO_API( const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain* domain, size_t* size) ); + +// exception +DO_API( void, il2cpp_raise_exception, (Il2CppException*) ); +DO_API( Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage* image, const char *name_space, const char *name, const char *msg) ); +DO_API( Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg) ); +DO_API( void, il2cpp_format_exception, (const Il2CppException* ex, char* message, int message_size) ); +DO_API( void, il2cpp_format_stack_trace, (const Il2CppException* ex, char* output, int output_size) ); +DO_API( void, il2cpp_unhandled_exception, (Il2CppException*) ); + +// field +DO_API( int, il2cpp_field_get_flags, (FieldInfo *field) ); +DO_API( const char*, il2cpp_field_get_name, (FieldInfo *field) ); +DO_API( Il2CppClass*, il2cpp_field_get_parent, (FieldInfo *field) ); +DO_API( size_t, il2cpp_field_get_offset, (FieldInfo *field) ); +DO_API( const Il2CppType*, il2cpp_field_get_type, (FieldInfo *field) ); +DO_API( void, il2cpp_field_get_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo *field, Il2CppObject *obj) ); +DO_API( bool, il2cpp_field_has_attribute, (FieldInfo *field, Il2CppClass *attr_class) ); +DO_API( void, il2cpp_field_set_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_get_value, (FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_set_value, (FieldInfo *field, void *value) ); + +// gc +DO_API( void, il2cpp_gc_collect, (int maxGenerations) ); +DO_API( int32_t, il2cpp_gc_collect_a_little, ()); +DO_API( void, il2cpp_gc_disable, ()); +DO_API( void, il2cpp_gc_enable, () ); +DO_API( int64_t, il2cpp_gc_get_used_size, () ); +DO_API( int64_t, il2cpp_gc_get_heap_size, () ); + +// gchandle +DO_API( uint32_t, il2cpp_gchandle_new, (Il2CppObject *obj, bool pinned) ); +DO_API( uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject *obj, bool track_resurrection) ); +DO_API( Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle) ); +DO_API( void, il2cpp_gchandle_free, (uint32_t gchandle) ); + +// liveness +DO_API( void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass* filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped) ); +DO_API( void, il2cpp_unity_liveness_calculation_end, (void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject* root, void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_statics, (void* state) ); + +// method +DO_API( const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo* method) ); +DO_API( Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo* method) ); +DO_API( const char*, il2cpp_method_get_name, (const MethodInfo *method) ); +DO_API( Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo *method, Il2CppClass *refclass) ); +DO_API( bool, il2cpp_method_is_generic, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_inflated, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_instance, (const MethodInfo *method) ); +DO_API( uint32_t, il2cpp_method_get_param_count, (const MethodInfo *method) ); +DO_API( const Il2CppType*, il2cpp_method_get_param, (const MethodInfo *method, uint32_t index) ); +DO_API( Il2CppClass*, il2cpp_method_get_class, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_has_attribute, (const MethodInfo *method, Il2CppClass *attr_class) ); +DO_API( uint32_t, il2cpp_method_get_flags, (const MethodInfo *method, uint32_t *iflags) ); +DO_API( uint32_t, il2cpp_method_get_token, (const MethodInfo *method) ); +DO_API( const char*, il2cpp_method_get_param_name, (const MethodInfo *method, uint32_t index) ); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API( void, il2cpp_profiler_install, (Il2CppProfiler *prof, Il2CppProfileFunc shutdown_callback) ); +DO_API( void, il2cpp_profiler_set_events, (Il2CppProfileFlags events) ); +DO_API( void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave) ); +DO_API( void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback) ); +DO_API( void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback) ); + +#endif + +// property +DO_API( uint32_t, il2cpp_property_get_flags, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo *prop) ); +DO_API( const char*, il2cpp_property_get_name, (PropertyInfo *prop) ); +DO_API( Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo *prop) ); + +// object +DO_API( Il2CppClass*, il2cpp_object_get_class, (Il2CppObject* obj) ); +DO_API( uint32_t, il2cpp_object_get_size, (Il2CppObject* obj) ); +DO_API( const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject *obj, const MethodInfo *method) ); +DO_API( Il2CppObject*, il2cpp_object_new, (const Il2CppClass *klass) ); +DO_API( void*, il2cpp_object_unbox, (Il2CppObject* obj) ); + +DO_API( Il2CppObject*, il2cpp_value_box, (Il2CppClass *klass, void* data) ); + +// monitor +DO_API( void, il2cpp_monitor_enter, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_enter, (Il2CppObject* obj, uint32_t timeout) ); +DO_API( void, il2cpp_monitor_exit, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse_all, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_wait, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_wait, (Il2CppObject* obj, uint32_t timeout) ); + +// runtime +DO_API( Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo *method, void *obj, void **params, Il2CppException **exc) ); +DO_API( Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo *method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc) ); +DO_API( void, il2cpp_runtime_class_init, (Il2CppClass* klass) ); +DO_API( void, il2cpp_runtime_object_init, (Il2CppObject* obj) ); + +DO_API( void, il2cpp_runtime_object_init_exception, (Il2CppObject* obj, Il2CppException** exc) ); + +DO_API( void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value) ); + +// string +DO_API( int32_t, il2cpp_string_length, (Il2CppString* str) ); +DO_API( Il2CppChar*, il2cpp_string_chars, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_new, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length) ); +DO_API( Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar *text, int32_t len) ); +DO_API( Il2CppString*, il2cpp_string_new_wrapper, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_intern, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_is_interned, (Il2CppString* str) ); + +// thread +DO_API( char*, il2cpp_thread_get_name, (Il2CppThread *thread, uint32_t *len) ); +DO_API( Il2CppThread*, il2cpp_thread_current, () ); +DO_API( Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain *domain) ); +DO_API( void, il2cpp_thread_detach, (Il2CppThread *thread) ); + +DO_API( Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t *size) ); +DO_API( bool, il2cpp_is_vm_thread, (Il2CppThread *thread) ); + +// stacktrace +DO_API( void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( void, il2cpp_thread_walk_frame_stack, (Il2CppThread* thread, Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_top_frame, (Il2CppThread* thread, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_frame_at, (Il2CppThread* thread, int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( int32_t, il2cpp_current_thread_get_stack_depth, () ); +DO_API( int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread *thread) ); + +// type +DO_API( Il2CppObject*, il2cpp_type_get_object, (const Il2CppType *type) ); +DO_API( int, il2cpp_type_get_type, (const Il2CppType *type) ); +DO_API( Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType *type) ); +DO_API( char*, il2cpp_type_get_name, (const Il2CppType *type) ); + +// image +DO_API( const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_name, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_filename, (const Il2CppImage *image) ); +DO_API( const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage* image) ); + +// Memory information +DO_API( Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, () ); +DO_API( void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot* snapshot) ); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +#if IL2CPP_DEBUGGER_ENABLED +// debug +DO_API( const Il2CppDebugTypeInfo*, il2cpp_debug_get_class_info, (const Il2CppClass *klass) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_class_get_document, (const Il2CppDebugTypeInfo* info) ); +DO_API( const char*, il2cpp_debug_document_get_filename, (const Il2CppDebugDocument* document) ); +DO_API( const char*, il2cpp_debug_document_get_directory, (const Il2CppDebugDocument* document) ); +DO_API( const Il2CppDebugMethodInfo*, il2cpp_debug_get_method_info, (const MethodInfo *method) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_method_get_document, (const Il2CppDebugMethodInfo* info) ); +DO_API( const int32_t*, il2cpp_debug_method_get_offset_table, (const Il2CppDebugMethodInfo* info) ); +DO_API( size_t, il2cpp_debug_method_get_code_size, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_update_frame_il_offset, (int32_t il_offset) ); +DO_API( const Il2CppDebugLocalsInfo**, il2cpp_debug_method_get_locals_info, (const Il2CppDebugMethodInfo* info) ); +DO_API( const Il2CppClass*, il2cpp_debug_local_get_type, (const Il2CppDebugLocalsInfo *info) ); +DO_API( const char*, il2cpp_debug_local_get_name, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_start_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_end_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( Il2CppObject*, il2cpp_debug_method_get_param_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( Il2CppObject*, il2cpp_debug_frame_get_local_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( void*, il2cpp_debug_method_get_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, int64_t uid, int32_t offset) ); +DO_API( void, il2cpp_debug_method_set_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location, void *data) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location) ); +#endif diff --git a/module/src/main/cpp/il2cppapi/5.5.1f1/il2cpp-class.h b/module/src/main/cpp/il2cppapi/5.5.1f1/il2cpp-class.h new file mode 100644 index 00000000..5856745f --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.5.1f1/il2cpp-class.h @@ -0,0 +1,1003 @@ +typedef uint32_t Il2CppMethodSlot; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef enum Il2CppProfileFlags +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19 +} Il2CppProfileFlags; +typedef enum Il2CppGCEvent +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum Il2CppStat +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum StackFrameType +{ + FRAME_TYPE_MANAGED = 0, + FRAME_TYPE_DEBUGGER_INVOKE = 1, + FRAME_TYPE_MANAGED_TO_NATIVE = 2, + FRAME_TYPE_SENTINEL = 3 +} StackFrameType; +typedef enum Il2CppRuntimeUnhandledExceptionPolicy +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct { + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef int32_t il2cpp_array_size_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST= 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t GuidIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +const GuidIndex kGuidIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +static inline Il2CppMetadataUsage GetEncodedIndexType (EncodedMethodIndex index) +{ + return (Il2CppMetadataUsage)((index & 0xE0000000) >> 29); +} +static inline uint32_t GetDecodedMethodIndex (EncodedMethodIndex index) +{ + return index & 0x1FFFFFFFU; +} +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + CustomAttributeIndex customAttributeIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + MethodIndex reversePInvokeWrapperIndex; + int32_t marshalingFunctionsIndex; + int32_t ccwFunctionIndex; + GuidIndex guidIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + CustomAttributeIndex customAttributeIndex; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex reversePInvokeWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct Il2CppGenericMethodIndices +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyName +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t publicKeyToken[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + uint32_t token; +} Il2CppImageDefinition; +typedef struct Il2CppAssembly +{ + ImageIndex imageIndex; + CustomAttributeIndex customAttributeIndex; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +typedef struct Il2CppRange +{ + int32_t start; + int32_t length; +} Il2CppRange; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; + int32_t unresolvedVirtualCallParameterTypesOffset; + int32_t unresolvedVirtualCallParameterTypesCount; + int32_t unresolvedVirtualCallParameterRangesOffset; + int32_t unresolvedVirtualCallParameterRangesCount; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum { + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE +} Il2CppCharSet; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppAppDomainSetup Il2CppAppDomainSetup; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct VirtualInvokeData +{ + Il2CppMethodPointer methodPtr; + const MethodInfo* method; +} VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +extern bool g_il2cpp_is_fully_initialized; +typedef struct { + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef struct CustomAttributeTypeCache +{ + int count; + Il2CppClass** attributeTypes; +} CustomAttributeTypeCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(const MethodInfo*, void*, void**); +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *declaring_type; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + CustomAttributeIndex customAttributeIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + const Il2CppType* byval_arg; + const Il2CppType* this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t genericRecursionDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t packingSize; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; + uint8_t is_vtable_initialized : 1; + VirtualInvokeData vtable[32]; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppAppDomainSetup* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; +} Il2CppDomain; +typedef struct Il2CppImage +{ + const char* name; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable* nameToClassHashTable; + uint32_t token; +} Il2CppImage; +typedef struct Il2CppMarshalingFunctions +{ + Il2CppMethodPointer marshal_to_native_func; + Il2CppMethodPointer marshal_from_native_func; + Il2CppMethodPointer marshal_cleanup_func; +} Il2CppMarshalingFunctions; +typedef struct Il2CppCodeGenOptions +{ + bool enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const Il2CppMethodPointer* methodPointers; + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t delegateWrappersFromManagedToNativeCount; + const Il2CppMethodPointer* delegateWrappersFromManagedToNative; + uint32_t marshalingFunctionsCount; + const Il2CppMarshalingFunctions* marshalingFunctions; + uint32_t ccwMarshalingFunctionsCount; + const Il2CppMethodPointer* ccwMarshalingFunctions; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + GuidIndex guidCount; + const Il2CppGuid** guids; + uint32_t unresolvedVirtualCallCount; + const Il2CppMethodPointer* unresolvedVirtualCallPointers; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + bool enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/il2cppapi/5.6.0f3/il2cpp-api-functions.h b/module/src/main/cpp/il2cppapi/5.6.0f3/il2cpp-api-functions.h new file mode 100644 index 00000000..6ac05688 --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.6.0f3/il2cpp-api-functions.h @@ -0,0 +1,262 @@ + +DO_API( void, il2cpp_init, (const char* domain_name) ); +DO_API( void, il2cpp_init_utf16, (const Il2CppChar* domain_name) ); +DO_API( void, il2cpp_shutdown, () ); +DO_API( void, il2cpp_set_config_dir, (const char *config_path) ); +DO_API( void, il2cpp_set_data_dir, (const char *data_path) ); +DO_API( void, il2cpp_set_commandline_arguments, (int argc, const char* const argv[], const char* basedir) ); +DO_API( void, il2cpp_set_commandline_arguments_utf16, (int argc, const Il2CppChar* const argv[], const char* basedir) ); +DO_API( void, il2cpp_set_config_utf16, (const Il2CppChar* executablePath) ); +DO_API( void, il2cpp_set_config, (const char* executablePath)); + +DO_API( void, il2cpp_set_memory_callbacks, (Il2CppMemoryCallbacks* callbacks) ); +DO_API( const Il2CppImage*, il2cpp_get_corlib, () ); +DO_API( void, il2cpp_add_internal_call, (const char* name, Il2CppMethodPointer method) ); +DO_API( Il2CppMethodPointer, il2cpp_resolve_icall, (const char* name) ); + +DO_API( void*, il2cpp_alloc, (size_t size) ); +DO_API( void, il2cpp_free, (void* ptr) ); + +// array +DO_API( Il2CppClass*, il2cpp_array_class_get, (Il2CppClass *element_class, uint32_t rank) ); +DO_API( uint32_t, il2cpp_array_length, (Il2CppArray* array) ); +DO_API( uint32_t, il2cpp_array_get_byte_length, (Il2CppArray *array) ); +DO_API( Il2CppArray*, il2cpp_array_new, (Il2CppClass *elementTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_specific, (Il2CppClass *arrayTypeInfo, il2cpp_array_size_t length) ); +DO_API( Il2CppArray*, il2cpp_array_new_full, (Il2CppClass *array_class, il2cpp_array_size_t *lengths, il2cpp_array_size_t *lower_bounds) ); +DO_API( Il2CppClass*, il2cpp_bounded_array_class_get, (Il2CppClass *element_class, uint32_t rank, bool bounded) ); +DO_API( int, il2cpp_array_element_size, (const Il2CppClass* array_class) ); + +// assembly +DO_API( const Il2CppImage*, il2cpp_assembly_get_image, (const Il2CppAssembly *assembly) ); + +// class +DO_API( const Il2CppType*, il2cpp_class_enum_basetype, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_generic, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_inflated, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_assignable_from, (Il2CppClass *klass, Il2CppClass *oklass) ); +DO_API( bool, il2cpp_class_is_subclass_of, (Il2CppClass *klass, Il2CppClass *klassc, bool check_interfaces) ); +DO_API( bool, il2cpp_class_has_parent, (Il2CppClass* klass, Il2CppClass* klassc) ); +DO_API( Il2CppClass*, il2cpp_class_from_il2cpp_type, (const Il2CppType* type) ); +DO_API( Il2CppClass*, il2cpp_class_from_name, (const Il2CppImage* image, const char* namespaze, const char *name) ); +DO_API( Il2CppClass*, il2cpp_class_from_system_type, (Il2CppReflectionType *type) ); +DO_API( Il2CppClass*, il2cpp_class_get_element_class, (Il2CppClass *klass) ); +DO_API( const EventInfo*, il2cpp_class_get_events, (Il2CppClass *klass, void* *iter)); +DO_API( FieldInfo*, il2cpp_class_get_fields, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_nested_types, (Il2CppClass *klass, void* *iter) ); +DO_API( Il2CppClass*, il2cpp_class_get_interfaces, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_properties, (Il2CppClass *klass, void* *iter) ); +DO_API( const PropertyInfo*, il2cpp_class_get_property_from_name, (Il2CppClass *klass, const char *name) ); +DO_API( FieldInfo*, il2cpp_class_get_field_from_name, (Il2CppClass* klass, const char *name) ); +DO_API( const MethodInfo*, il2cpp_class_get_methods, (Il2CppClass *klass, void* *iter) ); +DO_API( const MethodInfo*, il2cpp_class_get_method_from_name, (Il2CppClass *klass, const char* name, int argsCount) ); +DO_API( const char*, il2cpp_class_get_name, (Il2CppClass *klass) ); +DO_API( const char*, il2cpp_class_get_namespace, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_parent, (Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_get_declaring_type, (Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_instance_size, (Il2CppClass *klass) ); +DO_API( size_t, il2cpp_class_num_fields, (const Il2CppClass* enumKlass) ); +DO_API( bool, il2cpp_class_is_valuetype, (const Il2CppClass *klass) ); +DO_API( int32_t, il2cpp_class_value_size, (Il2CppClass *klass, uint32_t *align) ); +DO_API( int, il2cpp_class_get_flags, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_abstract, (const Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_interface, (const Il2CppClass *klass) ); +DO_API( int, il2cpp_class_array_element_size, (const Il2CppClass *klass) ); +DO_API( Il2CppClass*, il2cpp_class_from_type, (const Il2CppType *type) ); +DO_API( const Il2CppType*, il2cpp_class_get_type, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_has_attribute, (Il2CppClass *klass, Il2CppClass *attr_class) ); +DO_API( bool, il2cpp_class_has_references, (Il2CppClass *klass) ); +DO_API( bool, il2cpp_class_is_enum, (const Il2CppClass *klass) ); +DO_API( const Il2CppImage*, il2cpp_class_get_image, (Il2CppClass* klass) ); +DO_API( const char*, il2cpp_class_get_assemblyname, (const Il2CppClass *klass) ); + +// testing only +DO_API( size_t, il2cpp_class_get_bitmap_size, (const Il2CppClass *klass) ); +DO_API( void, il2cpp_class_get_bitmap, (Il2CppClass *klass, size_t* bitmap) ); + +// stats +DO_API( bool, il2cpp_stats_dump_to_file, (const char *path) ); +DO_API( uint64_t, il2cpp_stats_get_value, (Il2CppStat stat) ); + +// domain +DO_API( Il2CppDomain*, il2cpp_domain_get, () ); +DO_API( const Il2CppAssembly*, il2cpp_domain_assembly_open, (Il2CppDomain* domain, const char* name) ); +DO_API( const Il2CppAssembly**, il2cpp_domain_get_assemblies, (const Il2CppDomain* domain, size_t* size) ); + +// exception +DO_API( void, il2cpp_raise_exception, (Il2CppException*) ); +DO_API( Il2CppException*, il2cpp_exception_from_name_msg, (const Il2CppImage* image, const char *name_space, const char *name, const char *msg) ); +DO_API( Il2CppException*, il2cpp_get_exception_argument_null, (const char *arg) ); +DO_API( void, il2cpp_format_exception, (const Il2CppException* ex, char* message, int message_size) ); +DO_API( void, il2cpp_format_stack_trace, (const Il2CppException* ex, char* output, int output_size) ); +DO_API( void, il2cpp_unhandled_exception, (Il2CppException*) ); + +// field +DO_API( int, il2cpp_field_get_flags, (FieldInfo *field) ); +DO_API( const char*, il2cpp_field_get_name, (FieldInfo *field) ); +DO_API( Il2CppClass*, il2cpp_field_get_parent, (FieldInfo *field) ); +DO_API( size_t, il2cpp_field_get_offset, (FieldInfo *field) ); +DO_API( const Il2CppType*, il2cpp_field_get_type, (FieldInfo *field) ); +DO_API( void, il2cpp_field_get_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( Il2CppObject*, il2cpp_field_get_value_object, (FieldInfo *field, Il2CppObject *obj) ); +DO_API( bool, il2cpp_field_has_attribute, (FieldInfo *field, Il2CppClass *attr_class) ); +DO_API( void, il2cpp_field_set_value, (Il2CppObject *obj, FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_get_value, (FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_static_set_value, (FieldInfo *field, void *value) ); +DO_API( void, il2cpp_field_set_value_object, (Il2CppObject *instance, FieldInfo *field, Il2CppObject *value)); + +// gc +DO_API( void, il2cpp_gc_collect, (int maxGenerations) ); +DO_API( int32_t, il2cpp_gc_collect_a_little, ()); +DO_API( void, il2cpp_gc_disable, ()); +DO_API( void, il2cpp_gc_enable, () ); +DO_API( int64_t, il2cpp_gc_get_used_size, () ); +DO_API( int64_t, il2cpp_gc_get_heap_size, () ); + +// gchandle +DO_API( uint32_t, il2cpp_gchandle_new, (Il2CppObject *obj, bool pinned) ); +DO_API( uint32_t, il2cpp_gchandle_new_weakref, (Il2CppObject *obj, bool track_resurrection) ); +DO_API( Il2CppObject*, il2cpp_gchandle_get_target , (uint32_t gchandle) ); +DO_API( void, il2cpp_gchandle_free, (uint32_t gchandle) ); + +// liveness +DO_API( void*, il2cpp_unity_liveness_calculation_begin, (Il2CppClass* filter, int max_object_count, il2cpp_register_object_callback callback, void* userdata, il2cpp_WorldChangedCallback onWorldStarted, il2cpp_WorldChangedCallback onWorldStopped) ); +DO_API( void, il2cpp_unity_liveness_calculation_end, (void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_root, (Il2CppObject* root, void* state) ); +DO_API( void, il2cpp_unity_liveness_calculation_from_statics, (void* state) ); + +// method +DO_API( const Il2CppType*, il2cpp_method_get_return_type, (const MethodInfo* method) ); +DO_API( Il2CppClass*, il2cpp_method_get_declaring_type, (const MethodInfo* method) ); +DO_API( const char*, il2cpp_method_get_name, (const MethodInfo *method) ); +DO_API( Il2CppReflectionMethod*, il2cpp_method_get_object, (const MethodInfo *method, Il2CppClass *refclass) ); +DO_API( bool, il2cpp_method_is_generic, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_inflated, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_is_instance, (const MethodInfo *method) ); +DO_API( uint32_t, il2cpp_method_get_param_count, (const MethodInfo *method) ); +DO_API( const Il2CppType*, il2cpp_method_get_param, (const MethodInfo *method, uint32_t index) ); +DO_API( Il2CppClass*, il2cpp_method_get_class, (const MethodInfo *method) ); +DO_API( bool, il2cpp_method_has_attribute, (const MethodInfo *method, Il2CppClass *attr_class) ); +DO_API( uint32_t, il2cpp_method_get_flags, (const MethodInfo *method, uint32_t *iflags) ); +DO_API( uint32_t, il2cpp_method_get_token, (const MethodInfo *method) ); +DO_API( const char*, il2cpp_method_get_param_name, (const MethodInfo *method, uint32_t index) ); + +// profiler +#if IL2CPP_ENABLE_PROFILER + +DO_API( void, il2cpp_profiler_install, (Il2CppProfiler *prof, Il2CppProfileFunc shutdown_callback) ); +DO_API( void, il2cpp_profiler_set_events, (Il2CppProfileFlags events) ); +DO_API( void, il2cpp_profiler_install_enter_leave, (Il2CppProfileMethodFunc enter, Il2CppProfileMethodFunc fleave) ); +DO_API( void, il2cpp_profiler_install_allocation, (Il2CppProfileAllocFunc callback) ); +DO_API( void, il2cpp_profiler_install_gc, (Il2CppProfileGCFunc callback, Il2CppProfileGCResizeFunc heap_resize_callback) ); + +#endif + +// property +DO_API( uint32_t, il2cpp_property_get_flags, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_get_method, (PropertyInfo *prop) ); +DO_API( const MethodInfo*, il2cpp_property_get_set_method, (PropertyInfo *prop) ); +DO_API( const char*, il2cpp_property_get_name, (PropertyInfo *prop) ); +DO_API( Il2CppClass*, il2cpp_property_get_parent, (PropertyInfo *prop) ); + +// object +DO_API( Il2CppClass*, il2cpp_object_get_class, (Il2CppObject* obj) ); +DO_API( uint32_t, il2cpp_object_get_size, (Il2CppObject* obj) ); +DO_API( const MethodInfo*, il2cpp_object_get_virtual_method, (Il2CppObject *obj, const MethodInfo *method) ); +DO_API( Il2CppObject*, il2cpp_object_new, (const Il2CppClass *klass) ); +DO_API( void*, il2cpp_object_unbox, (Il2CppObject* obj) ); + +DO_API( Il2CppObject*, il2cpp_value_box, (Il2CppClass *klass, void* data) ); + +// monitor +DO_API( void, il2cpp_monitor_enter, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_enter, (Il2CppObject* obj, uint32_t timeout) ); +DO_API( void, il2cpp_monitor_exit, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_pulse_all, (Il2CppObject* obj) ); +DO_API( void, il2cpp_monitor_wait, (Il2CppObject* obj) ); +DO_API( bool, il2cpp_monitor_try_wait, (Il2CppObject* obj, uint32_t timeout) ); + +// runtime +DO_API( Il2CppObject*, il2cpp_runtime_invoke, (const MethodInfo *method, void *obj, void **params, Il2CppException **exc) ); +DO_API( Il2CppObject*, il2cpp_runtime_invoke_convert_args, (const MethodInfo *method, void *obj, Il2CppObject **params, int paramCount, Il2CppException **exc) ); +DO_API( void, il2cpp_runtime_class_init, (Il2CppClass* klass) ); +DO_API( void, il2cpp_runtime_object_init, (Il2CppObject* obj) ); + +DO_API( void, il2cpp_runtime_object_init_exception, (Il2CppObject* obj, Il2CppException** exc) ); + +DO_API( void, il2cpp_runtime_unhandled_exception_policy_set, (Il2CppRuntimeUnhandledExceptionPolicy value) ); + +// string +DO_API( int32_t, il2cpp_string_length, (Il2CppString* str) ); +DO_API( Il2CppChar*, il2cpp_string_chars, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_new, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_new_len, (const char* str, uint32_t length) ); +DO_API( Il2CppString*, il2cpp_string_new_utf16, (const Il2CppChar *text, int32_t len) ); +DO_API( Il2CppString*, il2cpp_string_new_wrapper, (const char* str) ); +DO_API( Il2CppString*, il2cpp_string_intern, (Il2CppString* str) ); +DO_API( Il2CppString*, il2cpp_string_is_interned, (Il2CppString* str) ); + +// thread +DO_API( char*, il2cpp_thread_get_name, (Il2CppThread *thread, uint32_t *len) ); +DO_API( Il2CppThread*, il2cpp_thread_current, () ); +DO_API( Il2CppThread*, il2cpp_thread_attach, (Il2CppDomain *domain) ); +DO_API( void, il2cpp_thread_detach, (Il2CppThread *thread) ); + +DO_API( Il2CppThread**, il2cpp_thread_get_all_attached_threads, (size_t *size) ); +DO_API( bool, il2cpp_is_vm_thread, (Il2CppThread *thread) ); + +// stacktrace +DO_API( void, il2cpp_current_thread_walk_frame_stack, (Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( void, il2cpp_thread_walk_frame_stack, (Il2CppThread* thread, Il2CppFrameWalkFunc func, void* user_data) ); +DO_API( bool, il2cpp_current_thread_get_top_frame, (Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_top_frame, (Il2CppThread* thread, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_current_thread_get_frame_at, (int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( bool, il2cpp_thread_get_frame_at, (Il2CppThread* thread, int32_t offset, Il2CppStackFrameInfo& frame) ); +DO_API( int32_t, il2cpp_current_thread_get_stack_depth, () ); +DO_API( int32_t, il2cpp_thread_get_stack_depth, (Il2CppThread *thread) ); + +// type +DO_API( Il2CppObject*, il2cpp_type_get_object, (const Il2CppType *type) ); +DO_API( int, il2cpp_type_get_type, (const Il2CppType *type) ); +DO_API( Il2CppClass*, il2cpp_type_get_class_or_element_class, (const Il2CppType *type) ); +DO_API( char*, il2cpp_type_get_name, (const Il2CppType *type) ); + +// image +DO_API( const Il2CppAssembly*, il2cpp_image_get_assembly, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_name, (const Il2CppImage *image) ); +DO_API( const char*, il2cpp_image_get_filename, (const Il2CppImage *image) ); +DO_API( const MethodInfo*, il2cpp_image_get_entry_point, (const Il2CppImage* image) ); + +// Memory information +DO_API( Il2CppManagedMemorySnapshot*, il2cpp_capture_memory_snapshot, () ); +DO_API( void, il2cpp_free_captured_memory_snapshot, (Il2CppManagedMemorySnapshot* snapshot) ); + +DO_API(void, il2cpp_set_find_plugin_callback, (Il2CppSetFindPlugInCallback method)); + +// Logging +DO_API(void, il2cpp_register_log_callback, (Il2CppLogCallback method)); + +#if IL2CPP_DEBUGGER_ENABLED +// debug +DO_API( const Il2CppDebugTypeInfo*, il2cpp_debug_get_class_info, (const Il2CppClass *klass) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_class_get_document, (const Il2CppDebugTypeInfo* info) ); +DO_API( const char*, il2cpp_debug_document_get_filename, (const Il2CppDebugDocument* document) ); +DO_API( const char*, il2cpp_debug_document_get_directory, (const Il2CppDebugDocument* document) ); +DO_API( const Il2CppDebugMethodInfo*, il2cpp_debug_get_method_info, (const MethodInfo *method) ); +DO_API( const Il2CppDebugDocument*, il2cpp_debug_method_get_document, (const Il2CppDebugMethodInfo* info) ); +DO_API( const int32_t*, il2cpp_debug_method_get_offset_table, (const Il2CppDebugMethodInfo* info) ); +DO_API( size_t, il2cpp_debug_method_get_code_size, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_update_frame_il_offset, (int32_t il_offset) ); +DO_API( const Il2CppDebugLocalsInfo**, il2cpp_debug_method_get_locals_info, (const Il2CppDebugMethodInfo* info) ); +DO_API( const Il2CppClass*, il2cpp_debug_local_get_type, (const Il2CppDebugLocalsInfo *info) ); +DO_API( const char*, il2cpp_debug_local_get_name, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_start_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( uint32_t, il2cpp_debug_local_get_end_offset, (const Il2CppDebugLocalsInfo *info) ); +DO_API( Il2CppObject*, il2cpp_debug_method_get_param_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( Il2CppObject*, il2cpp_debug_frame_get_local_value, (const Il2CppStackFrameInfo *info, uint32_t position) ); +DO_API( void*, il2cpp_debug_method_get_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, int64_t uid, int32_t offset) ); +DO_API( void, il2cpp_debug_method_set_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location, void *data) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data, (const Il2CppDebugMethodInfo* info) ); +DO_API( void, il2cpp_debug_method_clear_breakpoint_data_at, (const Il2CppDebugMethodInfo* info, uint64_t location) ); +#endif diff --git a/module/src/main/cpp/il2cppapi/5.6.0f3/il2cpp-class.h b/module/src/main/cpp/il2cppapi/5.6.0f3/il2cpp-class.h new file mode 100644 index 00000000..ed3b22ad --- /dev/null +++ b/module/src/main/cpp/il2cppapi/5.6.0f3/il2cpp-class.h @@ -0,0 +1,1013 @@ +typedef uint32_t Il2CppMethodSlot; +const int ipv6AddressSize = 16; +typedef int32_t il2cpp_hresult_t; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppType Il2CppType; +typedef struct EventInfo EventInfo; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct PropertyInfo PropertyInfo; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppArray Il2CppArray; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppDomain Il2CppDomain; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppException Il2CppException; +typedef struct Il2CppProfiler Il2CppProfiler; +typedef struct Il2CppObject Il2CppObject; +typedef struct Il2CppReflectionMethod Il2CppReflectionMethod; +typedef struct Il2CppReflectionType Il2CppReflectionType; +typedef struct Il2CppString Il2CppString; +typedef struct Il2CppThread Il2CppThread; +typedef struct Il2CppAsyncResult Il2CppAsyncResult; +typedef enum Il2CppProfileFlags +{ + IL2CPP_PROFILE_NONE = 0, + IL2CPP_PROFILE_APPDOMAIN_EVENTS = 1 << 0, + IL2CPP_PROFILE_ASSEMBLY_EVENTS = 1 << 1, + IL2CPP_PROFILE_MODULE_EVENTS = 1 << 2, + IL2CPP_PROFILE_CLASS_EVENTS = 1 << 3, + IL2CPP_PROFILE_JIT_COMPILATION = 1 << 4, + IL2CPP_PROFILE_INLINING = 1 << 5, + IL2CPP_PROFILE_EXCEPTIONS = 1 << 6, + IL2CPP_PROFILE_ALLOCATIONS = 1 << 7, + IL2CPP_PROFILE_GC = 1 << 8, + IL2CPP_PROFILE_THREADS = 1 << 9, + IL2CPP_PROFILE_REMOTING = 1 << 10, + IL2CPP_PROFILE_TRANSITIONS = 1 << 11, + IL2CPP_PROFILE_ENTER_LEAVE = 1 << 12, + IL2CPP_PROFILE_COVERAGE = 1 << 13, + IL2CPP_PROFILE_INS_COVERAGE = 1 << 14, + IL2CPP_PROFILE_STATISTICAL = 1 << 15, + IL2CPP_PROFILE_METHOD_EVENTS = 1 << 16, + IL2CPP_PROFILE_MONITOR_EVENTS = 1 << 17, + IL2CPP_PROFILE_IOMAP_EVENTS = 1 << 18, + IL2CPP_PROFILE_GC_MOVES = 1 << 19 +} Il2CppProfileFlags; +typedef enum Il2CppGCEvent +{ + IL2CPP_GC_EVENT_START, + IL2CPP_GC_EVENT_MARK_START, + IL2CPP_GC_EVENT_MARK_END, + IL2CPP_GC_EVENT_RECLAIM_START, + IL2CPP_GC_EVENT_RECLAIM_END, + IL2CPP_GC_EVENT_END, + IL2CPP_GC_EVENT_PRE_STOP_WORLD, + IL2CPP_GC_EVENT_POST_STOP_WORLD, + IL2CPP_GC_EVENT_PRE_START_WORLD, + IL2CPP_GC_EVENT_POST_START_WORLD +} Il2CppGCEvent; +typedef enum Il2CppStat +{ + IL2CPP_STAT_NEW_OBJECT_COUNT, + IL2CPP_STAT_INITIALIZED_CLASS_COUNT, + IL2CPP_STAT_METHOD_COUNT, + IL2CPP_STAT_CLASS_STATIC_DATA_SIZE, + IL2CPP_STAT_GENERIC_INSTANCE_COUNT, + IL2CPP_STAT_GENERIC_CLASS_COUNT, + IL2CPP_STAT_INFLATED_METHOD_COUNT, + IL2CPP_STAT_INFLATED_TYPE_COUNT, +} Il2CppStat; +typedef enum StackFrameType +{ + FRAME_TYPE_MANAGED = 0, + FRAME_TYPE_DEBUGGER_INVOKE = 1, + FRAME_TYPE_MANAGED_TO_NATIVE = 2, + FRAME_TYPE_SENTINEL = 3 +} StackFrameType; +typedef enum Il2CppRuntimeUnhandledExceptionPolicy +{ + IL2CPP_UNHANDLED_POLICY_LEGACY, + IL2CPP_UNHANDLED_POLICY_CURRENT +} Il2CppRuntimeUnhandledExceptionPolicy; +typedef struct Il2CppStackFrameInfo +{ + const MethodInfo *method; +} Il2CppStackFrameInfo; +typedef struct { + void* (*malloc_func)(size_t size); + void* (*aligned_malloc_func)(size_t size, size_t alignment); + void (*free_func)(void *ptr); + void (*aligned_free_func)(void *ptr); + void* (*calloc_func)(size_t nmemb, size_t size); + void* (*realloc_func)(void *ptr, size_t size); + void* (*aligned_realloc_func)(void *ptr, size_t size, size_t alignment); +} Il2CppMemoryCallbacks; +typedef uint16_t Il2CppChar; +typedef char Il2CppNativeChar; +typedef void (*il2cpp_register_object_callback)(Il2CppObject** arr, int size, void* userdata); +typedef void (*il2cpp_WorldChangedCallback)(); +typedef void (*Il2CppFrameWalkFunc) (const Il2CppStackFrameInfo *info, void *user_data); +typedef void (*Il2CppProfileFunc) (Il2CppProfiler* prof); +typedef void (*Il2CppProfileMethodFunc) (Il2CppProfiler* prof, const MethodInfo *method); +typedef void (*Il2CppProfileAllocFunc) (Il2CppProfiler* prof, Il2CppObject *obj, Il2CppClass *klass); +typedef void (*Il2CppProfileGCFunc) (Il2CppProfiler* prof, Il2CppGCEvent event, int generation); +typedef void (*Il2CppProfileGCResizeFunc) (Il2CppProfiler* prof, int64_t new_size); +typedef const Il2CppNativeChar* (*Il2CppSetFindPlugInCallback)(const Il2CppNativeChar*); +typedef void (*Il2CppLogCallback)(const char*); +typedef struct Il2CppManagedMemorySnapshot Il2CppManagedMemorySnapshot; +typedef void (*Il2CppMethodPointer)(); +typedef int32_t il2cpp_array_size_t; +typedef enum Il2CppTypeEnum +{ + IL2CPP_TYPE_END = 0x00, + IL2CPP_TYPE_VOID = 0x01, + IL2CPP_TYPE_BOOLEAN = 0x02, + IL2CPP_TYPE_CHAR = 0x03, + IL2CPP_TYPE_I1 = 0x04, + IL2CPP_TYPE_U1 = 0x05, + IL2CPP_TYPE_I2 = 0x06, + IL2CPP_TYPE_U2 = 0x07, + IL2CPP_TYPE_I4 = 0x08, + IL2CPP_TYPE_U4 = 0x09, + IL2CPP_TYPE_I8 = 0x0a, + IL2CPP_TYPE_U8 = 0x0b, + IL2CPP_TYPE_R4 = 0x0c, + IL2CPP_TYPE_R8 = 0x0d, + IL2CPP_TYPE_STRING = 0x0e, + IL2CPP_TYPE_PTR = 0x0f, + IL2CPP_TYPE_BYREF = 0x10, + IL2CPP_TYPE_VALUETYPE = 0x11, + IL2CPP_TYPE_CLASS = 0x12, + IL2CPP_TYPE_VAR = 0x13, + IL2CPP_TYPE_ARRAY = 0x14, + IL2CPP_TYPE_GENERICINST= 0x15, + IL2CPP_TYPE_TYPEDBYREF = 0x16, + IL2CPP_TYPE_I = 0x18, + IL2CPP_TYPE_U = 0x19, + IL2CPP_TYPE_FNPTR = 0x1b, + IL2CPP_TYPE_OBJECT = 0x1c, + IL2CPP_TYPE_SZARRAY = 0x1d, + IL2CPP_TYPE_MVAR = 0x1e, + IL2CPP_TYPE_CMOD_REQD = 0x1f, + IL2CPP_TYPE_CMOD_OPT = 0x20, + IL2CPP_TYPE_INTERNAL = 0x21, + IL2CPP_TYPE_MODIFIER = 0x40, + IL2CPP_TYPE_SENTINEL = 0x41, + IL2CPP_TYPE_PINNED = 0x45, + IL2CPP_TYPE_ENUM = 0x55 +} Il2CppTypeEnum; +typedef int32_t TypeIndex; +typedef int32_t TypeDefinitionIndex; +typedef int32_t FieldIndex; +typedef int32_t DefaultValueIndex; +typedef int32_t DefaultValueDataIndex; +typedef int32_t CustomAttributeIndex; +typedef int32_t ParameterIndex; +typedef int32_t MethodIndex; +typedef int32_t GenericMethodIndex; +typedef int32_t PropertyIndex; +typedef int32_t EventIndex; +typedef int32_t GenericContainerIndex; +typedef int32_t GenericParameterIndex; +typedef int16_t GenericParameterConstraintIndex; +typedef int32_t NestedTypeIndex; +typedef int32_t InterfacesIndex; +typedef int32_t VTableIndex; +typedef int32_t InterfaceOffsetIndex; +typedef int32_t RGCTXIndex; +typedef int32_t StringIndex; +typedef int32_t StringLiteralIndex; +typedef int32_t GenericInstIndex; +typedef int32_t ImageIndex; +typedef int32_t AssemblyIndex; +typedef int32_t InteropDataIndex; +const TypeIndex kTypeIndexInvalid = -1; +const TypeDefinitionIndex kTypeDefinitionIndexInvalid = -1; +const DefaultValueDataIndex kDefaultValueIndexNull = -1; +const EventIndex kEventIndexInvalid = -1; +const FieldIndex kFieldIndexInvalid = -1; +const MethodIndex kMethodIndexInvalid = -1; +const PropertyIndex kPropertyIndexInvalid = -1; +const GenericContainerIndex kGenericContainerIndexInvalid = -1; +const GenericParameterIndex kGenericParameterIndexInvalid = -1; +const RGCTXIndex kRGCTXIndexInvalid = -1; +const StringLiteralIndex kStringLiteralIndexInvalid = -1; +const InteropDataIndex kInteropDataIndexInvalid = -1; +typedef uint32_t EncodedMethodIndex; +typedef enum Il2CppMetadataUsage +{ + kIl2CppMetadataUsageInvalid, + kIl2CppMetadataUsageTypeInfo, + kIl2CppMetadataUsageIl2CppType, + kIl2CppMetadataUsageMethodDef, + kIl2CppMetadataUsageFieldInfo, + kIl2CppMetadataUsageStringLiteral, + kIl2CppMetadataUsageMethodRef, +} Il2CppMetadataUsage; +static inline Il2CppMetadataUsage GetEncodedIndexType (EncodedMethodIndex index) +{ + return (Il2CppMetadataUsage)((index & 0xE0000000) >> 29); +} +static inline uint32_t GetDecodedMethodIndex (EncodedMethodIndex index) +{ + return index & 0x1FFFFFFFU; +} +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppTypeDefinitionMetadata Il2CppTypeDefinitionMetadata; +typedef union Il2CppRGCTXDefinitionData +{ + int32_t rgctxDataDummy; + MethodIndex methodIndex; + TypeIndex typeIndex; +} Il2CppRGCTXDefinitionData; +typedef enum Il2CppRGCTXDataType +{ + IL2CPP_RGCTX_DATA_INVALID, + IL2CPP_RGCTX_DATA_TYPE, + IL2CPP_RGCTX_DATA_CLASS, + IL2CPP_RGCTX_DATA_METHOD +} Il2CppRGCTXDataType; +typedef struct Il2CppRGCTXDefinition +{ + Il2CppRGCTXDataType type; + Il2CppRGCTXDefinitionData data; +} Il2CppRGCTXDefinition; +typedef struct Il2CppInterfaceOffsetPair +{ + TypeIndex interfaceTypeIndex; + int32_t offset; +} Il2CppInterfaceOffsetPair; +typedef struct Il2CppTypeDefinition +{ + StringIndex nameIndex; + StringIndex namespaceIndex; + CustomAttributeIndex customAttributeIndex; + TypeIndex byvalTypeIndex; + TypeIndex byrefTypeIndex; + TypeIndex declaringTypeIndex; + TypeIndex parentIndex; + TypeIndex elementTypeIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + GenericContainerIndex genericContainerIndex; + uint32_t flags; + FieldIndex fieldStart; + MethodIndex methodStart; + EventIndex eventStart; + PropertyIndex propertyStart; + NestedTypeIndex nestedTypesStart; + InterfacesIndex interfacesStart; + VTableIndex vtableStart; + InterfacesIndex interfaceOffsetsStart; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint32_t bitfield; + uint32_t token; +} Il2CppTypeDefinition; +typedef struct Il2CppFieldDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppFieldDefinition; +typedef struct Il2CppFieldDefaultValue +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppFieldDefaultValue; +typedef struct Il2CppFieldMarshaledSize +{ + FieldIndex fieldIndex; + TypeIndex typeIndex; + int32_t size; +} Il2CppFieldMarshaledSize; +typedef struct Il2CppFieldRef +{ + TypeIndex typeIndex; + FieldIndex fieldIndex; +} Il2CppFieldRef; +typedef struct Il2CppParameterDefinition +{ + StringIndex nameIndex; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + TypeIndex typeIndex; +} Il2CppParameterDefinition; +typedef struct Il2CppParameterDefaultValue +{ + ParameterIndex parameterIndex; + TypeIndex typeIndex; + DefaultValueDataIndex dataIndex; +} Il2CppParameterDefaultValue; +typedef struct Il2CppMethodDefinition +{ + StringIndex nameIndex; + TypeDefinitionIndex declaringType; + TypeIndex returnType; + ParameterIndex parameterStart; + CustomAttributeIndex customAttributeIndex; + GenericContainerIndex genericContainerIndex; + MethodIndex methodIndex; + MethodIndex invokerIndex; + MethodIndex reversePInvokeWrapperIndex; + RGCTXIndex rgctxStartIndex; + int32_t rgctxCount; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint16_t parameterCount; +} Il2CppMethodDefinition; +typedef struct Il2CppEventDefinition +{ + StringIndex nameIndex; + TypeIndex typeIndex; + MethodIndex add; + MethodIndex remove; + MethodIndex raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppEventDefinition; +typedef struct Il2CppPropertyDefinition +{ + StringIndex nameIndex; + MethodIndex get; + MethodIndex set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} Il2CppPropertyDefinition; +typedef struct Il2CppMethodSpec +{ + MethodIndex methodDefinitionIndex; + GenericInstIndex classIndexIndex; + GenericInstIndex methodIndexIndex; +} Il2CppMethodSpec; +typedef struct Il2CppStringLiteral +{ + uint32_t length; + StringLiteralIndex dataIndex; +} Il2CppStringLiteral; +typedef struct Il2CppGenericMethodIndices +{ + MethodIndex methodIndex; + MethodIndex invokerIndex; +} Il2CppGenericMethodIndices; +typedef struct Il2CppGenericMethodFunctionsDefinitions +{ + GenericMethodIndex genericMethodIndex; + Il2CppGenericMethodIndices indices; +} Il2CppGenericMethodFunctionsDefinitions; +const int kPublicKeyByteLength = 8; +typedef struct Il2CppAssemblyName +{ + StringIndex nameIndex; + StringIndex cultureIndex; + StringIndex hashValueIndex; + StringIndex publicKeyIndex; + uint32_t hash_alg; + int32_t hash_len; + uint32_t flags; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; + uint8_t publicKeyToken[8]; +} Il2CppAssemblyName; +typedef struct Il2CppImageDefinition +{ + StringIndex nameIndex; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + uint32_t token; +} Il2CppImageDefinition; +typedef struct Il2CppAssembly +{ + ImageIndex imageIndex; + CustomAttributeIndex customAttributeIndex; + int32_t referencedAssemblyStart; + int32_t referencedAssemblyCount; + Il2CppAssemblyName aname; +} Il2CppAssembly; +typedef struct Il2CppMetadataUsageList +{ + uint32_t start; + uint32_t count; +} Il2CppMetadataUsageList; +typedef struct Il2CppMetadataUsagePair +{ + uint32_t destinationIndex; + uint32_t encodedSourceIndex; +} Il2CppMetadataUsagePair; +typedef struct Il2CppCustomAttributeTypeRange +{ + int32_t start; + int32_t count; +} Il2CppCustomAttributeTypeRange; +typedef struct Il2CppRange +{ + int32_t start; + int32_t length; +} Il2CppRange; +typedef struct Il2CppWindowsRuntimeTypeNamePair +{ + StringIndex nameIndex; + TypeIndex typeIndex; +} Il2CppWindowsRuntimeTypeNamePair; +#pragma pack(push, p1,4) +typedef struct Il2CppGlobalMetadataHeader +{ + int32_t sanity; + int32_t version; + int32_t stringLiteralOffset; + int32_t stringLiteralCount; + int32_t stringLiteralDataOffset; + int32_t stringLiteralDataCount; + int32_t stringOffset; + int32_t stringCount; + int32_t eventsOffset; + int32_t eventsCount; + int32_t propertiesOffset; + int32_t propertiesCount; + int32_t methodsOffset; + int32_t methodsCount; + int32_t parameterDefaultValuesOffset; + int32_t parameterDefaultValuesCount; + int32_t fieldDefaultValuesOffset; + int32_t fieldDefaultValuesCount; + int32_t fieldAndParameterDefaultValueDataOffset; + int32_t fieldAndParameterDefaultValueDataCount; + int32_t fieldMarshaledSizesOffset; + int32_t fieldMarshaledSizesCount; + int32_t parametersOffset; + int32_t parametersCount; + int32_t fieldsOffset; + int32_t fieldsCount; + int32_t genericParametersOffset; + int32_t genericParametersCount; + int32_t genericParameterConstraintsOffset; + int32_t genericParameterConstraintsCount; + int32_t genericContainersOffset; + int32_t genericContainersCount; + int32_t nestedTypesOffset; + int32_t nestedTypesCount; + int32_t interfacesOffset; + int32_t interfacesCount; + int32_t vtableMethodsOffset; + int32_t vtableMethodsCount; + int32_t interfaceOffsetsOffset; + int32_t interfaceOffsetsCount; + int32_t typeDefinitionsOffset; + int32_t typeDefinitionsCount; + int32_t rgctxEntriesOffset; + int32_t rgctxEntriesCount; + int32_t imagesOffset; + int32_t imagesCount; + int32_t assembliesOffset; + int32_t assembliesCount; + int32_t metadataUsageListsOffset; + int32_t metadataUsageListsCount; + int32_t metadataUsagePairsOffset; + int32_t metadataUsagePairsCount; + int32_t fieldRefsOffset; + int32_t fieldRefsCount; + int32_t referencedAssembliesOffset; + int32_t referencedAssembliesCount; + int32_t attributesInfoOffset; + int32_t attributesInfoCount; + int32_t attributeTypesOffset; + int32_t attributeTypesCount; + int32_t unresolvedVirtualCallParameterTypesOffset; + int32_t unresolvedVirtualCallParameterTypesCount; + int32_t unresolvedVirtualCallParameterRangesOffset; + int32_t unresolvedVirtualCallParameterRangesCount; + int32_t windowsRuntimeTypeNamesOffset; + int32_t windowsRuntimeTypeNamesSize; +} Il2CppGlobalMetadataHeader; +#pragma pack(pop, p1) +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct Il2CppType Il2CppType; +typedef struct Il2CppArrayType +{ + const Il2CppType* etype; + uint8_t rank; + uint8_t numsizes; + uint8_t numlobounds; + int *sizes; + int *lobounds; +} Il2CppArrayType; +typedef struct Il2CppGenericInst +{ + uint32_t type_argc; + const Il2CppType **type_argv; +} Il2CppGenericInst; +typedef struct Il2CppGenericContext +{ + const Il2CppGenericInst *class_inst; + const Il2CppGenericInst *method_inst; +} Il2CppGenericContext; +typedef struct Il2CppGenericParameter +{ + GenericContainerIndex ownerIndex; + StringIndex nameIndex; + GenericParameterConstraintIndex constraintsStart; + int16_t constraintsCount; + uint16_t num; + uint16_t flags; +} Il2CppGenericParameter; +typedef struct Il2CppGenericContainer +{ + int32_t ownerIndex; + int32_t type_argc; + int32_t is_method; + GenericParameterIndex genericParameterStart; +} Il2CppGenericContainer; +typedef struct Il2CppGenericClass +{ + TypeDefinitionIndex typeDefinitionIndex; + Il2CppGenericContext context; + Il2CppClass *cached_class; +} Il2CppGenericClass; +typedef struct Il2CppGenericMethod +{ + const MethodInfo* methodDefinition; + Il2CppGenericContext context; +} Il2CppGenericMethod; +typedef struct Il2CppType +{ + union { + void* dummy; + TypeDefinitionIndex klassIndex; + const Il2CppType *type; + Il2CppArrayType *array; + GenericParameterIndex genericParameterIndex; + Il2CppGenericClass *generic_class; + } data; + unsigned int attrs : 16; + Il2CppTypeEnum type : 8; + unsigned int num_mods : 6; + unsigned int byref : 1; + unsigned int pinned : 1; +} Il2CppType; +typedef enum { + IL2CPP_CALL_DEFAULT, + IL2CPP_CALL_C, + IL2CPP_CALL_STDCALL, + IL2CPP_CALL_THISCALL, + IL2CPP_CALL_FASTCALL, + IL2CPP_CALL_VARARG +} Il2CppCallConvention; +typedef enum Il2CppCharSet +{ + CHARSET_ANSI, + CHARSET_UNICODE +} Il2CppCharSet; +typedef struct Il2CppClass Il2CppClass; +typedef struct Il2CppGuid Il2CppGuid; +typedef struct Il2CppImage Il2CppImage; +typedef struct Il2CppAssembly Il2CppAssembly; +typedef struct Il2CppAppDomain Il2CppAppDomain; +typedef struct Il2CppAppDomainSetup Il2CppAppDomainSetup; +typedef struct Il2CppDelegate Il2CppDelegate; +typedef struct Il2CppAppContext Il2CppAppContext; +typedef struct Il2CppNameToTypeDefinitionIndexHashTable Il2CppNameToTypeDefinitionIndexHashTable; +typedef struct VirtualInvokeData +{ + Il2CppMethodPointer methodPtr; + const MethodInfo* method; +} VirtualInvokeData; +typedef enum Il2CppTypeNameFormat +{ + IL2CPP_TYPE_NAME_FORMAT_IL, + IL2CPP_TYPE_NAME_FORMAT_REFLECTION, + IL2CPP_TYPE_NAME_FORMAT_FULL_NAME, + IL2CPP_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED +} Il2CppTypeNameFormat; +extern bool g_il2cpp_is_fully_initialized; +typedef struct { + Il2CppImage *corlib; + Il2CppClass *object_class; + Il2CppClass *byte_class; + Il2CppClass *void_class; + Il2CppClass *boolean_class; + Il2CppClass *sbyte_class; + Il2CppClass *int16_class; + Il2CppClass *uint16_class; + Il2CppClass *int32_class; + Il2CppClass *uint32_class; + Il2CppClass *int_class; + Il2CppClass *uint_class; + Il2CppClass *int64_class; + Il2CppClass *uint64_class; + Il2CppClass *single_class; + Il2CppClass *double_class; + Il2CppClass *char_class; + Il2CppClass *string_class; + Il2CppClass *enum_class; + Il2CppClass *array_class; + Il2CppClass *delegate_class; + Il2CppClass *multicastdelegate_class; + Il2CppClass *asyncresult_class; + Il2CppClass *manualresetevent_class; + Il2CppClass *typehandle_class; + Il2CppClass *fieldhandle_class; + Il2CppClass *methodhandle_class; + Il2CppClass *systemtype_class; + Il2CppClass *monotype_class; + Il2CppClass *exception_class; + Il2CppClass *threadabortexception_class; + Il2CppClass *thread_class; + Il2CppClass *appdomain_class; + Il2CppClass *appdomain_setup_class; + Il2CppClass *field_info_class; + Il2CppClass *method_info_class; + Il2CppClass *property_info_class; + Il2CppClass *event_info_class; + Il2CppClass *mono_event_info_class; + Il2CppClass *stringbuilder_class; + Il2CppClass *stack_frame_class; + Il2CppClass *stack_trace_class; + Il2CppClass *marshal_class; + Il2CppClass *typed_reference_class; + Il2CppClass *marshalbyrefobject_class; + Il2CppClass *generic_ilist_class; + Il2CppClass *generic_icollection_class; + Il2CppClass *generic_ienumerable_class; + Il2CppClass *generic_nullable_class; + Il2CppClass *il2cpp_com_object_class; + Il2CppClass *customattribute_data_class; + Il2CppClass *version; + Il2CppClass *culture_info; + Il2CppClass *async_call_class; + Il2CppClass *assembly_class; + Il2CppClass *assembly_name_class; + Il2CppClass *enum_info_class; + Il2CppClass *mono_field_class; + Il2CppClass *mono_method_class; + Il2CppClass *mono_method_info_class; + Il2CppClass *mono_property_info_class; + Il2CppClass *parameter_info_class; + Il2CppClass *module_class; + Il2CppClass *pointer_class; + Il2CppClass *system_exception_class; + Il2CppClass *argument_exception_class; + Il2CppClass *wait_handle_class; + Il2CppClass *safe_handle_class; + Il2CppClass *sort_key_class; + Il2CppClass *dbnull_class; + Il2CppClass *error_wrapper_class; + Il2CppClass *missing_class; + Il2CppClass *value_type_class; + Il2CppClass* ireference_class; + Il2CppClass* ikey_value_pair_class; + Il2CppClass* key_value_pair_class; +} Il2CppDefaults; +extern Il2CppDefaults il2cpp_defaults; +typedef struct Il2CppClass Il2CppClass; +typedef struct MethodInfo MethodInfo; +typedef struct FieldInfo FieldInfo; +typedef struct Il2CppObject Il2CppObject; +typedef struct MemberInfo MemberInfo; +typedef struct CustomAttributesCache +{ + int count; + Il2CppObject** attributes; +} CustomAttributesCache; +typedef struct CustomAttributeTypeCache +{ + int count; + Il2CppClass** attributeTypes; +} CustomAttributeTypeCache; +typedef void (*CustomAttributesCacheGenerator)(CustomAttributesCache*); +const int THREAD_STATIC_FIELD_OFFSET = -1; +typedef struct FieldInfo +{ + const char* name; + const Il2CppType* type; + Il2CppClass *parent; + int32_t offset; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} FieldInfo; +typedef struct PropertyInfo +{ + Il2CppClass *parent; + const char *name; + const MethodInfo *get; + const MethodInfo *set; + uint32_t attrs; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} PropertyInfo; +typedef struct EventInfo +{ + const char* name; + const Il2CppType* eventType; + Il2CppClass* parent; + const MethodInfo* add; + const MethodInfo* remove; + const MethodInfo* raise; + CustomAttributeIndex customAttributeIndex; + uint32_t token; +} EventInfo; +typedef struct ParameterInfo +{ + const char* name; + int32_t position; + uint32_t token; + CustomAttributeIndex customAttributeIndex; + const Il2CppType* parameter_type; +} ParameterInfo; +typedef void* (*InvokerMethod)(const MethodInfo*, void*, void**); +typedef union Il2CppRGCTXData +{ + void* rgctxDataDummy; + const MethodInfo* method; + const Il2CppType* type; + Il2CppClass* klass; +} Il2CppRGCTXData; +typedef struct MethodInfo +{ + Il2CppMethodPointer methodPointer; + InvokerMethod invoker_method; + const char* name; + Il2CppClass *declaring_type; + const Il2CppType *return_type; + const ParameterInfo* parameters; + union + { + const Il2CppRGCTXData* rgctx_data; + const Il2CppMethodDefinition* methodDefinition; + }; + union + { + const Il2CppGenericMethod* genericMethod; + const Il2CppGenericContainer* genericContainer; + }; + CustomAttributeIndex customAttributeIndex; + uint32_t token; + uint16_t flags; + uint16_t iflags; + uint16_t slot; + uint8_t parameters_count; + uint8_t is_generic : 1; + uint8_t is_inflated : 1; +} MethodInfo; +typedef struct Il2CppRuntimeInterfaceOffsetPair +{ + Il2CppClass* interfaceType; + int32_t offset; +} Il2CppRuntimeInterfaceOffsetPair; +typedef void (*PInvokeMarshalToNativeFunc)(void* managedStructure, void* marshaledStructure); +typedef void (*PInvokeMarshalFromNativeFunc)(void* marshaledStructure, void* managedStructure); +typedef void (*PInvokeMarshalCleanupFunc)(void* marshaledStructure); +typedef struct Il2CppIManagedObjectHolder* (*CreateCCWFunc)(Il2CppObject* obj); +typedef struct Il2CppInteropData +{ + Il2CppMethodPointer delegatePInvokeWrapperFunction; + PInvokeMarshalToNativeFunc pinvokeMarshalToNativeFunction; + PInvokeMarshalFromNativeFunc pinvokeMarshalFromNativeFunction; + PInvokeMarshalCleanupFunc pinvokeMarshalCleanupFunction; + CreateCCWFunc createCCWFunction; + const Il2CppGuid* guid; + const Il2CppType* type; +} Il2CppInteropData; +typedef struct Il2CppClass +{ + const Il2CppImage* image; + void* gc_desc; + const char* name; + const char* namespaze; + const Il2CppType* byval_arg; + const Il2CppType* this_arg; + Il2CppClass* element_class; + Il2CppClass* castClass; + Il2CppClass* declaringType; + Il2CppClass* parent; + Il2CppGenericClass *generic_class; + const Il2CppTypeDefinition* typeDefinition; + const Il2CppInteropData* interopData; + FieldInfo* fields; + const EventInfo* events; + const PropertyInfo* properties; + const MethodInfo** methods; + Il2CppClass** nestedTypes; + Il2CppClass** implementedInterfaces; + Il2CppRuntimeInterfaceOffsetPair* interfaceOffsets; + void* static_fields; + const Il2CppRGCTXData* rgctx_data; + Il2CppClass** typeHierarchy; + uint32_t cctor_started; + uint32_t cctor_finished; + __attribute__((aligned(8))) uint64_t cctor_thread; + GenericContainerIndex genericContainerIndex; + CustomAttributeIndex customAttributeIndex; + uint32_t instance_size; + uint32_t actualSize; + uint32_t element_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; + int32_t thread_static_fields_offset; + uint32_t flags; + uint32_t token; + uint16_t method_count; + uint16_t property_count; + uint16_t field_count; + uint16_t event_count; + uint16_t nested_type_count; + uint16_t vtable_count; + uint16_t interfaces_count; + uint16_t interface_offsets_count; + uint8_t typeHierarchyDepth; + uint8_t genericRecursionDepth; + uint8_t rank; + uint8_t minimumAlignment; + uint8_t packingSize; + uint8_t valuetype : 1; + uint8_t initialized : 1; + uint8_t enumtype : 1; + uint8_t is_generic : 1; + uint8_t has_references : 1; + uint8_t init_pending : 1; + uint8_t size_inited : 1; + uint8_t has_finalize : 1; + uint8_t has_cctor : 1; + uint8_t is_blittable : 1; + uint8_t is_import_or_windows_runtime : 1; + uint8_t is_vtable_initialized : 1; + VirtualInvokeData vtable[32]; +} Il2CppClass; +typedef struct Il2CppTypeDefinitionSizes +{ + uint32_t instance_size; + int32_t native_size; + uint32_t static_fields_size; + uint32_t thread_static_fields_size; +} Il2CppTypeDefinitionSizes; +typedef struct Il2CppDomain +{ + Il2CppAppDomain* domain; + Il2CppAppDomainSetup* setup; + Il2CppAppContext* default_context; + const char* friendly_name; + uint32_t domain_id; +} Il2CppDomain; +typedef struct Il2CppImage +{ + const char* name; + AssemblyIndex assemblyIndex; + TypeDefinitionIndex typeStart; + uint32_t typeCount; + MethodIndex entryPointIndex; + Il2CppNameToTypeDefinitionIndexHashTable* nameToClassHashTable; + uint32_t token; +} Il2CppImage; +typedef struct Il2CppCodeGenOptions +{ + bool enablePrimitiveValueTypeGenericSharing; +} Il2CppCodeGenOptions; +typedef struct Il2CppCodeRegistration +{ + uint32_t methodPointersCount; + const Il2CppMethodPointer* methodPointers; + uint32_t reversePInvokeWrapperCount; + const Il2CppMethodPointer* reversePInvokeWrappers; + uint32_t genericMethodPointersCount; + const Il2CppMethodPointer* genericMethodPointers; + uint32_t invokerPointersCount; + const InvokerMethod* invokerPointers; + CustomAttributeIndex customAttributeCount; + const CustomAttributesCacheGenerator* customAttributeGenerators; + uint32_t unresolvedVirtualCallCount; + const Il2CppMethodPointer* unresolvedVirtualCallPointers; + uint32_t interopDataCount; + Il2CppInteropData* interopData; +} Il2CppCodeRegistration; +typedef struct Il2CppMetadataRegistration +{ + int32_t genericClassesCount; + Il2CppGenericClass* const * genericClasses; + int32_t genericInstsCount; + const Il2CppGenericInst* const * genericInsts; + int32_t genericMethodTableCount; + const Il2CppGenericMethodFunctionsDefinitions* genericMethodTable; + int32_t typesCount; + const Il2CppType* const * types; + int32_t methodSpecsCount; + const Il2CppMethodSpec* methodSpecs; + FieldIndex fieldOffsetsCount; + const int32_t** fieldOffsets; + TypeDefinitionIndex typeDefinitionsSizesCount; + const Il2CppTypeDefinitionSizes** typeDefinitionsSizes; + const size_t metadataUsagesCount; + void** const* metadataUsages; +} Il2CppMetadataRegistration; +typedef struct Il2CppRuntimeStats +{ + uint64_t new_object_count; + uint64_t initialized_class_count; + uint64_t method_count; + uint64_t class_static_data_size; + uint64_t generic_instance_count; + uint64_t generic_class_count; + uint64_t inflated_method_count; + uint64_t inflated_type_count; + bool enabled; +} Il2CppRuntimeStats; +extern Il2CppRuntimeStats il2cpp_runtime_stats; +typedef struct Il2CppPerfCounters +{ + uint32_t jit_methods; + uint32_t jit_bytes; + uint32_t jit_time; + uint32_t jit_failures; + uint32_t exceptions_thrown; + uint32_t exceptions_filters; + uint32_t exceptions_finallys; + uint32_t exceptions_depth; + uint32_t aspnet_requests_queued; + uint32_t aspnet_requests; + uint32_t gc_collections0; + uint32_t gc_collections1; + uint32_t gc_collections2; + uint32_t gc_promotions0; + uint32_t gc_promotions1; + uint32_t gc_promotion_finalizers; + uint32_t gc_gen0size; + uint32_t gc_gen1size; + uint32_t gc_gen2size; + uint32_t gc_lossize; + uint32_t gc_fin_survivors; + uint32_t gc_num_handles; + uint32_t gc_allocated; + uint32_t gc_induced; + uint32_t gc_time; + uint32_t gc_total_bytes; + uint32_t gc_committed_bytes; + uint32_t gc_reserved_bytes; + uint32_t gc_num_pinned; + uint32_t gc_sync_blocks; + uint32_t remoting_calls; + uint32_t remoting_channels; + uint32_t remoting_proxies; + uint32_t remoting_classes; + uint32_t remoting_objects; + uint32_t remoting_contexts; + uint32_t loader_classes; + uint32_t loader_total_classes; + uint32_t loader_appdomains; + uint32_t loader_total_appdomains; + uint32_t loader_assemblies; + uint32_t loader_total_assemblies; + uint32_t loader_failures; + uint32_t loader_bytes; + uint32_t loader_appdomains_uloaded; + uint32_t thread_contentions; + uint32_t thread_queue_len; + uint32_t thread_queue_max; + uint32_t thread_num_logical; + uint32_t thread_num_physical; + uint32_t thread_cur_recognized; + uint32_t thread_num_recognized; + uint32_t interop_num_ccw; + uint32_t interop_num_stubs; + uint32_t interop_num_marshals; + uint32_t security_num_checks; + uint32_t security_num_link_checks; + uint32_t security_time; + uint32_t security_depth; + uint32_t unused; + uint64_t threadpool_workitems; + uint64_t threadpool_ioworkitems; + unsigned int threadpool_threads; + unsigned int threadpool_iothreads; +} Il2CppPerfCounters; + +struct MonitorData; +struct Il2CppObject { + struct Il2CppClass *klass; + struct MonitorData *monitor; +}; +typedef int32_t il2cpp_array_lower_bound_t; +struct Il2CppArrayBounds { + il2cpp_array_size_t length; + il2cpp_array_lower_bound_t lower_bound; +}; +struct Il2CppArray { + struct Il2CppObject obj; + struct Il2CppArrayBounds *bounds; + il2cpp_array_size_t max_length; + /* vector must be 8-byte aligned. + On 64-bit platforms, this happens naturally. + On 32-bit platforms, sizeof(obj)=8, sizeof(bounds)=4 and sizeof(max_length)=4 so it's also already aligned. */ + void *vector[32]; +}; +struct Il2CppString { + struct Il2CppObject object; + int32_t length; + uint16_t chars[32]; +}; diff --git a/module/src/main/cpp/log.h b/module/src/main/cpp/log.h new file mode 100644 index 00000000..5f75ef29 --- /dev/null +++ b/module/src/main/cpp/log.h @@ -0,0 +1,16 @@ +// +// Created by Perfare on 2020/7/4. +// + +#ifndef RIRU_IL2CPPDUMPER_LOG_H +#define RIRU_IL2CPPDUMPER_LOG_H + +#include + +#define LOG_TAG "Perfare" +#define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__) +#define LOGW(...) __android_log_print(ANDROID_LOG_WARN, LOG_TAG, __VA_ARGS__) +#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__) +#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__) + +#endif //RIRU_IL2CPPDUMPER_LOG_H diff --git a/module/src/main/cpp/main.cpp b/module/src/main/cpp/main.cpp new file mode 100644 index 00000000..4925cba0 --- /dev/null +++ b/module/src/main/cpp/main.cpp @@ -0,0 +1,79 @@ +#include +#include +#include +#include +#include "hook.h" + +// You can remove functions you don't need + +extern "C" { +#define EXPORT __attribute__((visibility("default"))) __attribute__((used)) +EXPORT void nativeForkAndSpecializePre( + JNIEnv *env, jclass clazz, jint *_uid, jint *gid, jintArray *gids, jint *runtimeFlags, + jobjectArray *rlimits, jint *mountExternal, jstring *seInfo, jstring *niceName, + jintArray *fdsToClose, jintArray *fdsToIgnore, jboolean *is_child_zygote, + jstring *instructionSet, jstring *appDataDir, jboolean *isTopApp, + jobjectArray *pkgDataInfoList, + jobjectArray *whitelistedDataInfoList, jboolean *bindMountAppDataDirs, + jboolean *bindMountAppStorageDirs) { + enable_hack = isGame(env, *appDataDir); +} + +EXPORT int nativeForkAndSpecializePost(JNIEnv *env, jclass clazz, jint res) { + if (res == 0) { + // in app process + if (enable_hack) { + int ret; + pthread_t ntid; + if ((ret = pthread_create(&ntid, NULL, hack_thread, NULL))) { + LOGE("can't create thread: %s\n", strerror(ret)); + } + } + } else { + // in zygote process, res is child pid + // don't print log here, see https://github.com/RikkaApps/Riru/blob/77adfd6a4a6a81bfd20569c910bc4854f2f84f5e/riru-core/jni/main/jni_native_method.cpp#L55-L66 + } + return 0; +} + +EXPORT __attribute__((visibility("default"))) void specializeAppProcessPre( + JNIEnv *env, jclass clazz, jint *_uid, jint *gid, jintArray *gids, jint *runtimeFlags, + jobjectArray *rlimits, jint *mountExternal, jstring *seInfo, jstring *niceName, + jboolean *startChildZygote, jstring *instructionSet, jstring *appDataDir, + jboolean *isTopApp, jobjectArray *pkgDataInfoList, jobjectArray *whitelistedDataInfoList, + jboolean *bindMountAppDataDirs, jboolean *bindMountAppStorageDirs) { + // added from Android 10, but disabled at least in Google Pixel devices +} + +EXPORT __attribute__((visibility("default"))) int specializeAppProcessPost( + JNIEnv *env, jclass clazz) { + // added from Android 10, but disabled at least in Google Pixel devices + return 0; +} + +EXPORT void nativeForkSystemServerPre( + JNIEnv *env, jclass clazz, uid_t *uid, gid_t *gid, jintArray *gids, jint *runtimeFlags, + jobjectArray *rlimits, jlong *permittedCapabilities, jlong *effectiveCapabilities) { + +} + +EXPORT int nativeForkSystemServerPost(JNIEnv *env, jclass clazz, jint res) { + if (res == 0) { + // in system server process + } else { + // in zygote process, res is child pid + // don't print log here, see https://github.com/RikkaApps/Riru/blob/77adfd6a4a6a81bfd20569c910bc4854f2f84f5e/riru-core/jni/main/jni_native_method.cpp#L55-L66 + } + return 0; +} + +EXPORT int shouldSkipUid(int uid) { + // by default, Riru only call module functions in "normal app processes" (10000 <= uid % 100000 <= 19999) + // false = don't skip + return false; +} + +EXPORT void onModuleLoaded() { + // called when the shared library of Riru core is loaded +} +} \ No newline at end of file diff --git a/module/src/main/cpp/whale/CMakeLists.txt b/module/src/main/cpp/whale/CMakeLists.txt new file mode 100644 index 00000000..ea7357c5 --- /dev/null +++ b/module/src/main/cpp/whale/CMakeLists.txt @@ -0,0 +1,201 @@ +cmake_minimum_required(VERSION 3.4.1) + +set(CMAKE_CXX_STANDARD 14) + +set(LIBRARY_NAME "whale") + +enable_language(ASM) +SET(CMAKE_ASM_FLAGS "${CFLAGS} -x assembler-with-cpp") + +if (CMAKE_SYSTEM_NAME MATCHES "^Android") + set(KERNEL "Linux") + set(PLATFORM "Android") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=hidden") + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -s -Wl,--gc-sections") + add_definitions("-DWHALE_ANDROID_AUTO_LOAD") + message("Building Whale for Android(${CMAKE_SYSTEM_PROCESSOR})...") +elseif (PLATFORM STREQUAL "IOS") + set(KERNEL "Darwin") + add_definitions("-DDARWIN") + include_directories(/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/usr/include) + if (IOS_ARCH STREQUAL "arm64") + set(CMAKE_SYSTEM_PROCESSOR "aarch64") + elseif (IOS_ARCH MATCHES "arm") + set(CMAKE_SYSTEM_PROCESSOR "arm") + endif () + message("Building Whale for IOS(${CMAKE_SYSTEM_PROCESSOR})...") +else () + if (${CMAKE_SYSTEM_NAME} STREQUAL "Darwin") + set(KERNEL "Darwin") + add_definitions("-DDARWIN") + endif () + message("Building Whale for ${CMAKE_SYSTEM_NAME}(${CMAKE_SYSTEM_PROCESSOR})...") +endif () + + +include_directories(src/libffi src/libffi/platform_include) + +set(WHALE_SOURCES + src/whale.cc + src/interceptor.cc + src/dbi/hook_common.cc + src/platform/memory.cc + src/assembler/assembler.cc + src/assembler/memory_region.cc + src/dbi/instruction_set.cc + src/libffi/closures.c + src/libffi/debug.c + src/libffi/dlmalloc.c + src/libffi/java_raw_api.c + src/libffi/prep_cif.c + src/libffi/raw_api.c + src/libffi/types.c + src/libffi/ffi_cxx.cc + ) + +set(WHALE_LINUX_SOURCES + src/platform/linux/elf_image.cc + src/platform/linux/process_map.cc + ) + + +set(WHALE_DARWIN_SOURCES + src/dbi/darwin/macho_import_hook.cc + ) + +set(WHALE_ANDROID_ART + src/android/art/native_on_load.cc + src/android/art/art_runtime.cc + src/android/art/art_symbol_resolver.cc + src/android/art/java_types.cc + src/android/art/well_known_classes.cc + src/android/art/art_method.cc + src/android/art/scoped_thread_state_change.cc + src/android/art/art_jni_trampoline.cc + ) + +set(WHALE_AARCH32 + src/dbi/arm/decoder_arm.cc + src/dbi/arm/decoder_thumb.cc + src/dbi/arm/inline_hook_arm.cc + src/dbi/arm/instruction_rewriter_arm.cc + src/libffi/arm/ffi_armv7.c + src/libffi/arm/sysv_armv7.S + ) + +set(WHALE_AARCH64 + src/dbi/arm64/decoder_arm64.cc + src/dbi/arm64/instruction_rewriter_arm64.cc + src/dbi/arm64/inline_hook_arm64.cc + src/libffi/aarch64/ffi_arm64.c + src/libffi/aarch64/sysv_arm64.S + ) + +set(WHALE_DISASSEMBLER_X86_OR_X86_64 + src/dbi/x86/distorm/decoder.c + src/dbi/x86/distorm/distorm.c + src/dbi/x86/distorm/instructions.c + src/dbi/x86/distorm/insts.c + src/dbi/x86/distorm/mnemonics.c + src/dbi/x86/distorm/operands.c + src/dbi/x86/distorm/prefix.c + src/dbi/x86/distorm/textdefs.c + src/dbi/x86/distorm/wstring.c + ) + +set(WHALE_X86 + src/assembler/x86/assembler_x86.cc + src/assembler/x86/managed_register_x86.cc + src/dbi/x86/inline_hook_x86.cc + src/dbi/x86/intercept_syscall_x86.cc + src/dbi/x86/instruction_rewriter_x86.cc + src/libffi/x86/ffi_i386.c + src/libffi/x86/sysv_i386.S + ) + +set(WHALE_X86_64 + src/assembler/x86_64/assembler_x86_64.cc + src/assembler/x86_64/managed_register_x86_64.cc + src/dbi/x86_64/inline_hook_x86_64.cc + src/dbi/x86_64/instruction_rewriter_x86_64.cc + src/libffi/x86/ffiw64_x86_64.c + src/libffi/x86/ffi64_x86_64.c + src/libffi/x86/unix64_x86_64.S + src/libffi/x86/win64_x86_64.S + ) + + +if (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") + + set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -arch armv7") + set(WHALE_SOURCES ${WHALE_SOURCES} ${WHALE_AARCH32}) + +elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^aarch64") + + set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -arch arm64") + set(WHALE_SOURCES ${WHALE_SOURCES} ${WHALE_AARCH64}) + +elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(i.86|x86?)$") + + set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -arch x86") + set(WHALE_SOURCES ${WHALE_SOURCES} ${WHALE_X86} ${WHALE_DISASSEMBLER_X86_OR_X86_64}) + +elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^x86_64") + + set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -arch x86_64") + set(WHALE_SOURCES ${WHALE_SOURCES} ${WHALE_X86_64} ${WHALE_DISASSEMBLER_X86_OR_X86_64}) + +endif () + + +if (PLATFORM STREQUAL "Android") + + set(WHALE_SOURCES ${WHALE_SOURCES} ${WHALE_ANDROID_ART}) + + if (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") + + set(WHALE_SOURCES ${WHALE_SOURCES} ${WHALE_ANDROID_ART_AARCH32}) + + elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^aarch64") + + set(WHALE_SOURCES ${WHALE_SOURCES} ${WHALE_ANDROID_ART_AARCH64}) + + elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(i.86|x86?)$") + + set(WHALE_SOURCES ${WHALE_SOURCES} ${WHALE_ANDROID_ART_X86}) + + elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^x86_64") + set(WHALE_SOURCES ${WHALE_SOURCES} ${WHALE_ANDROID_ART_X86_64}) + + endif () + +endif () + +if (KERNEL STREQUAL "Linux") + + set(WHALE_SOURCES ${WHALE_SOURCES} ${WHALE_LINUX_SOURCES}) + +elseif (KERNEL STREQUAL "Darwin") + set(WHALE_SOURCES ${WHALE_SOURCES} ${WHALE_DARWIN_SOURCES}) +endif () + +if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm|aarch64)") + include_directories(src/assembler/vixl) +endif () + +add_subdirectory(src/assembler/vixl) +include_directories(src) + +add_definitions("-DWHALE_LIBRARY_NAME=\"lib${LIBRARY_NAME}.so\"") +add_library(${LIBRARY_NAME} STATIC ${WHALE_SOURCES}) + +target_include_directories(${LIBRARY_NAME} PUBLIC include) + +if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm|aarch64)") + target_link_libraries(${LIBRARY_NAME} vixl) +endif () + +if (PLATFORM STREQUAL "Android") + target_link_libraries(${LIBRARY_NAME} log) +endif () diff --git a/module/src/main/cpp/whale/include/whale.h b/module/src/main/cpp/whale/include/whale.h new file mode 100644 index 00000000..c31188af --- /dev/null +++ b/module/src/main/cpp/whale/include/whale.h @@ -0,0 +1,23 @@ +#ifndef WHALE_PUBLIC_H_ +#define WHALE_PUBLIC_H_ + +#define NULLABLE + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +void WInlineHookFunction(void *address, void *replace, void **backup); + +void WImportHookFunction(const char *name, NULLABLE const char *libname, void *replace, void **backup); + +void *WDynamicLibOpen(const char *name); + +void *WDynamicLibOpenAlias(const char *name, const char *path); + +void *WDynamicLibSymbol(void *handle, const char *name); + +#ifdef __cplusplus +} +#endif // __cplusplus +#endif // WHALE_PUBLIC_H_ diff --git a/module/src/main/cpp/whale/src/android/android_build.h b/module/src/main/cpp/whale/src/android/android_build.h new file mode 100644 index 00000000..5d20d6ae --- /dev/null +++ b/module/src/main/cpp/whale/src/android/android_build.h @@ -0,0 +1,30 @@ +#ifndef WHALE_ANDROID_ANDROID_BUILD_H_ +#define WHALE_ANDROID_ANDROID_BUILD_H_ + +#include +#include +#include + +#define ANDROID_ICE_CREAM_SANDWICH 14 +#define ANDROID_ICE_CREAM_SANDWICH_MR1 15 +#define ANDROID_JELLY_BEAN 16 +#define ANDROID_JELLY_BEAN_MR1 17 +#define ANDROID_JELLY_BEAN_MR2 18 +#define ANDROID_KITKAT 19 +#define ANDROID_KITKAT_WATCH 20 +#define ANDROID_LOLLIPOP 21 +#define ANDROID_LOLLIPOP_MR1 22 +#define ANDROID_M 23 +#define ANDROID_N 24 +#define ANDROID_N_MR1 25 +#define ANDROID_O 26 +#define ANDROID_O_MR1 27 +#define ANDROID_P 28 + +static inline int32_t GetAndroidApiLevel() { + char prop_value[PROP_VALUE_MAX]; + __system_property_get("ro.build.version.sdk", prop_value); + return atoi(prop_value); +} + +#endif // WHALE_ANDROID_ANDROID_BUILD_H_ diff --git a/module/src/main/cpp/whale/src/android/art/art_hook_param.h b/module/src/main/cpp/whale/src/android/art/art_hook_param.h new file mode 100644 index 00000000..ecaef27e --- /dev/null +++ b/module/src/main/cpp/whale/src/android/art/art_hook_param.h @@ -0,0 +1,31 @@ +#ifndef WHALE_ANDROID_ART_INTERCEPT_PARAM_H_ +#define WHALE_ANDROID_ART_INTERCEPT_PARAM_H_ + +#include +#include "base/primitive_types.h" +#include "ffi_cxx.h" + +namespace whale { +namespace art { + +struct ArtHookParam final { + bool is_static_; + const char *shorty_; + jobject addition_info_; + ptr_t origin_compiled_code_; + ptr_t origin_jni_code_; + u4 origin_access_flags; + u4 origin_code_item_off; + jobject origin_method_; + jobject hooked_method_; + volatile ptr_t decl_class_; + jobject class_Loader_; + jmethodID hooked_native_method_; + jmethodID origin_native_method_; + FFIClosure *jni_closure_; +}; + +} // namespace art +} // namespace whale + +#endif // WHALE_ANDROID_ART_INTERCEPT_PARAM_H_ diff --git a/module/src/main/cpp/whale/src/android/art/art_jni_trampoline.cc b/module/src/main/cpp/whale/src/android/art/art_jni_trampoline.cc new file mode 100644 index 00000000..f0f1a82f --- /dev/null +++ b/module/src/main/cpp/whale/src/android/art/art_jni_trampoline.cc @@ -0,0 +1,261 @@ +#include +#include +#include "android/art/art_jni_trampoline.h" +#include "android/art/java_types.h" +#include "android/art/well_known_classes.h" +#include "android/art/art_runtime.h" +#include "platform/memory.h" +#include "base/macros.h" +#include "ffi_cxx.h" + +namespace whale { +namespace art { + +static void UnBoxValue(JNIEnv *env, jvalue *jv, jobject obj, char type) { + if (obj == nullptr) { + jv->l = obj; + return; + } + switch (type) { + case 'I': + jv->i = Types::FromInteger(env, obj); + break; + case 'Z': + jv->z = Types::FromBoolean(env, obj); + break; + case 'J': + jv->j = Types::FromLong(env, obj); + break; + case 'F': + jv->f = Types::FromFloat(env, obj); + break; + case 'B': + jv->b = Types::FromByte(env, obj); + break; + case 'D': + jv->d = Types::FromDouble(env, obj); + break; + case 'S': + jv->s = Types::FromShort(env, obj); + break; + case 'C': + jv->c = Types::FromCharacter(env, obj); + break; + default: + jv->l = obj; + } +} + +template +static RetType +InvokeJavaBridge(JNIEnv *env, ArtHookParam *param, jobject this_object, + jobjectArray arguments) { + jobject ret = ArtRuntime::Get()->InvokeHookedMethodBridge( + env, + param, + this_object, + arguments + ); + jvalue val; + UnBoxValue(env, &val, ret, param->shorty_[0]); + return ForceCast(val); +} + +static void InvokeVoidJavaBridge(JNIEnv *env, ArtHookParam *param, jobject this_object, + jobjectArray arguments) { + ArtRuntime::Get()->InvokeHookedMethodBridge( + env, + param, + this_object, + arguments + ); +} + +class QuickArgumentBuilder { + public: + QuickArgumentBuilder(JNIEnv *env, size_t len) : env_(env), index_(0) { + array_ = env->NewObjectArray( + static_cast(len), + WellKnownClasses::java_lang_Object, + nullptr + ); + } + +#define APPEND_DEF(name, type) \ + void Append##name(type value) { \ + env_->SetObjectArrayElement(array_, index_++, \ + Types::To##name(env_, value)); \ + } \ + + + APPEND_DEF(Integer, jint) + + APPEND_DEF(Boolean, jboolean) + + APPEND_DEF(Byte, jbyte) + + APPEND_DEF(Character, jchar) + + APPEND_DEF(Short, jshort) + + APPEND_DEF(Float, jfloat) + + APPEND_DEF(Double, jdouble) + + APPEND_DEF(Long, jlong) + + APPEND_DEF(Object, jobject) + + +#undef APPEND_DEF + + jobjectArray GetArray() { + return array_; + } + + private: + JNIEnv *env_; + jobjectArray array_; + int index_; +}; + +FFIType FFIGetJniParameter(char shorty) { + switch (shorty) { + case 'Z': + return FFIType::kFFITypeU1; + case 'B': + return FFIType::kFFITypeS1; + case 'C': + return FFIType::kFFITypeU2; + case 'S': + return FFIType::kFFITypeS2; + case 'I': + return FFIType::kFFITypeS4; + case 'J': + return FFIType::kFFITypeS8; + case 'F': + return FFIType::kFFITypeFloat; + case 'D': + return FFIType::kFFITypeDouble; + case 'L': + return FFIType::kFFITypePointer; + case 'V': + return FFIType::kFFITypeVoid; + default: + LOG(FATAL) << "unhandled shorty type: " << shorty; + UNREACHABLE(); + } +} + +void FFIJniDispatcher(FFIClosure *closure, void *resp, void **args, void *userdata) { +#define FFI_ARG(name, type) \ + builder.Append##name(*reinterpret_cast(args[i])); + + ArtHookParam *param = reinterpret_cast(userdata); + const char *argument = param->shorty_ + 1; + unsigned int argument_len = (unsigned int) strlen(argument); + JNIEnv *env = *reinterpret_cast(args[0]); + jobject this_object = nullptr; + if (!param->is_static_) { + this_object = *reinterpret_cast(args[1]); + } + // skip first two arguments + args += 2; + QuickArgumentBuilder builder(env, argument_len); + + for (int i = 0; i < argument_len; ++i) { + switch (argument[i]) { + case 'Z': + FFI_ARG(Boolean, jboolean); + break; + case 'B': + FFI_ARG(Byte, jbyte); + break; + case 'C': + FFI_ARG(Character, jchar); + break; + case 'S': + FFI_ARG(Short, jshort); + break; + case 'I': + FFI_ARG(Integer, jint); + break; + case 'J': + FFI_ARG(Long, jlong); + break; + case 'F': + FFI_ARG(Float, jfloat); + break; + case 'D': + FFI_ARG(Double, jdouble); + break; + case 'L': + FFI_ARG(Object, jobject); + break; + default: + LOG(FATAL) << "unhandled shorty type: " << argument[i]; + UNREACHABLE(); + } + } +#define INVOKE(type) \ + *reinterpret_cast(resp) = InvokeJavaBridge(env, param, this_object, \ + builder.GetArray()); + + switch (param->shorty_[0]) { + case 'Z': + INVOKE(jboolean); + break; + case 'B': + INVOKE(jbyte); + break; + case 'C': + INVOKE(jchar); + break; + case 'S': + INVOKE(jshort); + break; + case 'I': + INVOKE(jint); + break; + case 'J': + INVOKE(jlong); + break; + case 'F': + INVOKE(jfloat); + break; + case 'D': + INVOKE(jdouble); + break; + case 'L': + INVOKE(jobject); + break; + case 'V': + InvokeVoidJavaBridge(env, param, this_object, builder.GetArray()); + break; + default: + LOG(FATAL) << "unhandled shorty type: " << param->shorty_[0]; + UNREACHABLE(); + } +#undef INVOKE +#undef FFI_ARG +} + + +void BuildJniClosure(ArtHookParam *param) { + const char *argument = param->shorty_ + 1; + unsigned int java_argument_len = (unsigned int) strlen(argument); + unsigned int jni_argument_len = java_argument_len + 2; + FFICallInterface *cif = new FFICallInterface( + FFIGetJniParameter(param->shorty_[0]) + ); + cif->Parameter(FFIType::kFFITypePointer); // JNIEnv * + cif->Parameter(FFIType::kFFITypePointer); // jclass or jobject + for (int i = 2; i < jni_argument_len; ++i) { + cif->Parameter(FFIGetJniParameter(argument[i - 2])); + } + cif->FinalizeCif(); + param->jni_closure_ = cif->CreateClosure(param, FFIJniDispatcher); +} + +} // namespace art +} // namespace whale diff --git a/module/src/main/cpp/whale/src/android/art/art_jni_trampoline.h b/module/src/main/cpp/whale/src/android/art/art_jni_trampoline.h new file mode 100644 index 00000000..f272a985 --- /dev/null +++ b/module/src/main/cpp/whale/src/android/art/art_jni_trampoline.h @@ -0,0 +1,16 @@ +#ifndef WHALE_ANDROID_ART_JNI_TRAMPOLINE_H_ +#define WHALE_ANDROID_ART_JNI_TRAMPOLINE_H_ + +#include "android/art/art_hook_param.h" + +namespace whale { +namespace art { + + +void BuildJniClosure(ArtHookParam *param); + + +} // namespace art +} // namespace whale + +#endif // WHALE_ANDROID_ART_JNI_TRAMPOLINE_H_ diff --git a/module/src/main/cpp/whale/src/android/art/art_method.cc b/module/src/main/cpp/whale/src/android/art/art_method.cc new file mode 100644 index 00000000..df18aae6 --- /dev/null +++ b/module/src/main/cpp/whale/src/android/art/art_method.cc @@ -0,0 +1,79 @@ +#include "android/art/art_method.h" +#include "well_known_classes.h" + +namespace whale { +namespace art { + +jobject ArtMethod::Clone(JNIEnv *env, u4 access_flags) { + int32_t api_level = GetAndroidApiLevel(); + jmethodID jni_clone_method = nullptr; + if (api_level < ANDROID_M) { + jni_clone_method = + reinterpret_cast(ArtRuntime::Get()->CloneArtObject(jni_method_)); + } else { + jni_clone_method = reinterpret_cast(malloc(offset_->method_size_)); + if (symbols_->ArtMethod_CopyFrom) { + symbols_->ArtMethod_CopyFrom(jni_clone_method, jni_method_, sizeof(ptr_t)); + } else { + memcpy(jni_clone_method, jni_method_, offset_->method_size_); + } + } + + ArtMethod clone_method = ArtMethod(jni_clone_method); + bool is_direct_method = (access_flags & kAccDirectFlags) != 0; + bool is_native_method = (access_flags & kAccNative) != 0; + if (!is_direct_method) { + access_flags &= ~(kAccPublic | kAccProtected); + access_flags |= kAccPrivate; + } + access_flags &= ~kAccSynchronized; + if (api_level < ANDROID_O_MR1) { + access_flags |= kAccCompileDontBother_N; + } else { + access_flags |= kAccCompileDontBother_O_MR1; + access_flags |= kAccPreviouslyWarm_O_MR1; + } + if (!is_native_method) { + access_flags |= kAccSkipAccessChecks; + } + if (api_level >= ANDROID_N) { + clone_method.SetHotnessCount(0); + if (!is_native_method) { + ptr_t profiling_info = GetEntryPointFromJni(); + if (profiling_info != nullptr) { + offset_t end = sizeof(u4) * 4; + for (offset_t offset = 0; offset != end; offset += sizeof(u4)) { + if (MemberOf(profiling_info, offset) == jni_method_) { + AssignOffset(profiling_info, offset, jni_clone_method); + } + } + } + } + } + if (!is_native_method && symbols_->art_quick_to_interpreter_bridge) { + clone_method.SetEntryPointFromQuickCompiledCode( + symbols_->art_quick_to_interpreter_bridge); + } + + clone_method.SetAccessFlags(access_flags); + + bool is_constructor = (access_flags & kAccConstructor) != 0; + bool is_static = (access_flags & kAccStatic) != 0; + if (is_constructor) { + clone_method.RemoveAccessFlags(kAccConstructor); + } + jobject java_method = env->ToReflectedMethod(WellKnownClasses::java_lang_Object, + jni_clone_method, + static_cast(is_static)); + env->CallVoidMethod(java_method, + WellKnownClasses::java_lang_reflect_AccessibleObject_setAccessible, + true); + if (is_constructor) { + clone_method.AddAccessFlags(kAccConstructor); + } + return java_method; +} + + +} // namespace art +} // namespace whale diff --git a/module/src/main/cpp/whale/src/android/art/art_method.h b/module/src/main/cpp/whale/src/android/art/art_method.h new file mode 100644 index 00000000..6ceadbcc --- /dev/null +++ b/module/src/main/cpp/whale/src/android/art/art_method.h @@ -0,0 +1,145 @@ +#ifndef WHALE_ANDROID_ART_ART_METHOD_H_ +#define WHALE_ANDROID_ART_ART_METHOD_H_ + +#include +#include +#include "android/art/art_runtime.h" +#include "android/art/modifiers.h" +#include "base/cxx_helper.h" +#include "base/primitive_types.h" + +namespace whale { +namespace art { + +class ArtMethod final { + public: + explicit ArtMethod(jmethodID method) : jni_method_(method) { + ArtRuntime *runtime = ArtRuntime::Get(); + offset_ = runtime->GetArtMethodOffsets(); + symbols_ = runtime->GetSymbols(); + } + + u4 GetAccessFlags() { + return MemberOf(jni_method_, offset_->access_flags_offset_); + } + + void SetAccessFlags(u4 access_flags) { + AssignOffset(jni_method_, offset_->access_flags_offset_, access_flags); + } + + void AddAccessFlags(u4 access_flags) { + SetAccessFlags(GetAccessFlags() | access_flags); + } + + void RemoveAccessFlags(u4 access_flags) { + SetAccessFlags(GetAccessFlags() & ~access_flags); + } + + bool HasAccessFlags(u4 access_flags) { + return (access_flags & GetAccessFlags()) != 0; + } + + u2 GetMethodIndex() { + return MemberOf(jni_method_, offset_->method_index_offset_); + } + + u4 GetDexMethodIndex() { + return MemberOf(jni_method_, offset_->dex_method_index_offset_); + } + + u4 GetDexCodeItemOffset() { + return MemberOf(jni_method_, offset_->dex_code_item_offset_offset_); + } + + u2 GetHotnessCount() { + return MemberOf(jni_method_, offset_->hotness_count_offset_); + } + + void SetMethodIndex(u2 index) { + AssignOffset(jni_method_, offset_->method_index_offset_, index); + } + + void SetDexMethodIndex(u4 index) { + AssignOffset(jni_method_, offset_->dex_method_index_offset_, index); + } + + void SetDexCodeItemOffset(u4 offset) { + AssignOffset(jni_method_, offset_->dex_code_item_offset_offset_, offset); + } + + void SetHotnessCount(u2 count) { + AssignOffset(jni_method_, offset_->hotness_count_offset_, count); + } + + ptr_t GetEntryPointFromQuickCompiledCode() { + return MemberOf(jni_method_, offset_->quick_code_offset_); + } + + void SetEntryPointFromQuickCompiledCode(ptr_t entry_point) { + AssignOffset(jni_method_, offset_->quick_code_offset_, entry_point); + } + + ptr_t GetEntryPointFromInterpreterCode() { + return MemberOf(jni_method_, offset_->interpreter_code_offset_); + } + + void SetEntryPointFromInterpreterCode(ptr_t entry_point) { + AssignOffset(jni_method_, offset_->interpreter_code_offset_, entry_point); + } + + ptr_t GetEntryPointFromJni() { + return MemberOf(jni_method_, offset_->jni_code_offset_); + } + + void SetEntryPointFromJni(ptr_t entry_point) { + AssignOffset(jni_method_, offset_->jni_code_offset_, entry_point); + } + + /* + * Notice: This is a GcRoot reference. + */ + ptr_t GetDeclaringClass() { + return MemberOf(jni_method_, 0); + } + + void SetDeclaringClass(ptr_t declaring_class) { + AssignOffset(jni_method_, 0, declaring_class); + } + + const char *GetShorty(JNIEnv *env, jobject java_method) { + if (symbols_->Art_GetMethodShorty != nullptr) { + return symbols_->Art_GetMethodShorty(env, jni_method_); + } else { + static jmethodID WhaleRuntime_getShorty = nullptr; + jclass java_class = ArtRuntime::Get()->java_class_; + if (WhaleRuntime_getShorty == nullptr) { + WhaleRuntime_getShorty = env->GetStaticMethodID( + java_class, + "getShorty", + "(Ljava/lang/reflect/Member;)Ljava/lang/String;" + ); + } + jstring jshorty = static_cast(env->CallStaticObjectMethod( + java_class, + WhaleRuntime_getShorty, + java_method + )); + const char *shorty = env->GetStringUTFChars(jshorty, nullptr); + return shorty; + } + } + + jobject Clone(JNIEnv *env, u4 access_flags); + + private: + jmethodID jni_method_; + // Convenient for quick invocation + ArtMethodOffsets *offset_; + ResolvedSymbols *symbols_; +}; + + +} // namespace art +} // namespace whale + +#endif // WHALE_ANDROID_ART_ART_METHOD_H_ diff --git a/module/src/main/cpp/whale/src/android/art/art_runtime.cc b/module/src/main/cpp/whale/src/android/art/art_runtime.cc new file mode 100644 index 00000000..424550fb --- /dev/null +++ b/module/src/main/cpp/whale/src/android/art/art_runtime.cc @@ -0,0 +1,495 @@ +#include +#include "whale.h" +#include "android/android_build.h" +#include "android/art/art_runtime.h" +#include "android/art/modifiers.h" +#include "android/art/native_on_load.h" +#include "android/art/art_method.h" +#include "android/art/art_symbol_resolver.h" +#include "android/art/scoped_thread_state_change.h" +#include "android/art/art_jni_trampoline.h" +#include "android/art/well_known_classes.h" +#include "android/art/java_types.h" +#include "platform/memory.h" +#include "base/logging.h" +#include "base/singleton.h" +#include "base/cxx_helper.h" + +namespace whale { +namespace art { + +ArtRuntime *ArtRuntime::Get() { + static ArtRuntime instance; + return &instance; +} + +void PreLoadRequiredStuff(JNIEnv *env) { + Types::Load(env); + WellKnownClasses::Load(env); + ScopedNoGCDaemons::Load(env); +} + + +bool ArtRuntime::OnLoad(JavaVM *vm, JNIEnv *env, jclass java_class) { +#define CHECK_FIELD(field, value) \ + if ((field) == (value)) { \ + LOG(ERROR) << "Failed to find " #field "."; \ + return false; \ + } + if ((kRuntimeISA == InstructionSet::kArm + || kRuntimeISA == InstructionSet::kArm64) + && IsFileInMemory("libhoudini.so")) { + LOG(INFO) << '[' << getpid() << ']' << " Unable to launch on houdini environment."; + return false; + } + vm_ = vm; + java_class_ = reinterpret_cast(env->NewGlobalRef(java_class)); + bridge_method_ = env->GetStaticMethodID( + java_class, + "handleHookedMethod", + "(Ljava/lang/reflect/Member;JLjava/lang/Object;Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;" + ); + if (JNIExceptionCheck(env)) { + return false; + } + api_level_ = GetAndroidApiLevel(); + PreLoadRequiredStuff(env); + const char *art_path = kLibArtPath; + art_elf_image_ = WDynamicLibOpen(art_path); + if (art_elf_image_ == nullptr) { + LOG(ERROR) << "Unable to read data from libart.so."; + return false; + } + if (!art_symbol_resolver_.Resolve(art_elf_image_, api_level_)) { + // The log will all output from ArtSymbolResolver. + return false; + } + offset_t jni_code_offset = INT32_MAX; + offset_t access_flags_offset = INT32_MAX; + + size_t entrypoint_filed_size = (api_level_ <= ANDROID_LOLLIPOP) ? 8 + : kPointerSize; + u4 expected_access_flags = kAccPrivate | kAccStatic | kAccNative; + jmethodID reserved0 = env->GetStaticMethodID(java_class, kMethodReserved0, "()V"); + jmethodID reserved1 = env->GetStaticMethodID(java_class, kMethodReserved1, "()V"); + + for (offset_t offset = 0; offset != sizeof(u4) * 24; offset += sizeof(u4)) { + if (MemberOf(reserved0, offset) == expected_access_flags) { + access_flags_offset = offset; + break; + } + } + void *native_function = reinterpret_cast(WhaleRuntime_reserved0); + + for (offset_t offset = 0; offset != sizeof(u4) * 24; offset += sizeof(u4)) { + if (MemberOf(reserved0, offset) == native_function) { + jni_code_offset = offset; + break; + } + } + CHECK_FIELD(access_flags_offset, INT32_MAX) + CHECK_FIELD(jni_code_offset, INT32_MAX) + + method_offset_.method_size_ = DistanceOf(reserved0, reserved1); + method_offset_.jni_code_offset_ = jni_code_offset; + method_offset_.quick_code_offset_ = jni_code_offset + entrypoint_filed_size; + method_offset_.access_flags_offset_ = access_flags_offset; + method_offset_.dex_code_item_offset_offset_ = access_flags_offset + sizeof(u4); + method_offset_.dex_method_index_offset_ = access_flags_offset + sizeof(u4) * 2; + method_offset_.method_index_offset_ = access_flags_offset + sizeof(u4) * 3; + if (api_level_ < ANDROID_N + && GetSymbols()->artInterpreterToCompiledCodeBridge != nullptr) { + method_offset_.interpreter_code_offset_ = jni_code_offset - entrypoint_filed_size; + } + if (api_level_ >= ANDROID_N) { + method_offset_.hotness_count_offset_ = method_offset_.method_index_offset_ + sizeof(u2); + } + ptr_t quick_generic_jni_trampoline = WDynamicLibSymbol( + art_elf_image_, + "art_quick_generic_jni_trampoline" + ); + env->CallStaticVoidMethod(java_class, reserved0); + + /** + * Fallback to do a relative memory search for quick_generic_jni_trampoline, + * This case is almost impossible to enter + * because its symbols are found almost always on all devices. + * This algorithm has been verified on 5.0 ~ 9.0. + * And we're pretty sure that its structure has not changed in the OEM Rom. + */ + if (quick_generic_jni_trampoline == nullptr) { + ptr_t heap = nullptr; + ptr_t thread_list = nullptr; + ptr_t class_linker = nullptr; + ptr_t intern_table = nullptr; + + ptr_t runtime = MemberOf(vm, kPointerSize); + CHECK_FIELD(runtime, nullptr) + runtime_objects_.runtime_ = runtime; + + offset_t start = (kPointerSize == 4) ? 200 : 384; + offset_t end = start + (100 * kPointerSize); + for (offset_t offset = start; offset != end; offset += kPointerSize) { + if (MemberOf(runtime, offset) == vm) { + size_t class_linker_offset = offset - (kPointerSize * 3) - (2 * kPointerSize); + if (api_level_ >= ANDROID_O_MR1) { + class_linker_offset -= kPointerSize; + } + offset_t intern_table_offset = class_linker_offset - kPointerSize; + offset_t thread_list_Offset = intern_table_offset - kPointerSize; + offset_t heap_offset = thread_list_Offset - (4 * kPointerSize); + if (api_level_ >= ANDROID_M) { + heap_offset -= 3 * kPointerSize; + } + if (api_level_ >= ANDROID_N) { + heap_offset -= kPointerSize; + } + heap = MemberOf(runtime, heap_offset); + thread_list = MemberOf(runtime, thread_list_Offset); + class_linker = MemberOf(runtime, class_linker_offset); + intern_table = MemberOf(runtime, intern_table_offset); + break; + } + } + CHECK_FIELD(heap, nullptr) + CHECK_FIELD(thread_list, nullptr) + CHECK_FIELD(class_linker, nullptr) + CHECK_FIELD(intern_table, nullptr) + + runtime_objects_.heap_ = heap; + runtime_objects_.thread_list_ = thread_list; + runtime_objects_.class_linker_ = class_linker; + runtime_objects_.intern_table_ = intern_table; + + start = kPointerSize * 25; + end = start + (100 * kPointerSize); + for (offset_t offset = start; offset != end; offset += kPointerSize) { + if (MemberOf(class_linker, offset) == intern_table) { + offset_t target_offset = + offset + ((api_level_ >= ANDROID_M) ? 3 : 5) * kPointerSize; + quick_generic_jni_trampoline = MemberOf(class_linker, target_offset); + break; + } + } + } + CHECK_FIELD(quick_generic_jni_trampoline, nullptr) + class_linker_objects_.quick_generic_jni_trampoline_ = quick_generic_jni_trampoline; + + pthread_mutex_init(&mutex, nullptr); + EnforceDisableHiddenAPIPolicy(); + if (api_level_ >= ANDROID_N) { + FixBugN(); + } + return true; + +#undef CHECK_OFFSET +} + + +jlong +ArtRuntime::HookMethod(JNIEnv *env, jclass decl_class, jobject hooked_java_method, + jobject addition_info) { + ScopedSuspendAll suspend_all; + + jmethodID hooked_jni_method = env->FromReflectedMethod(hooked_java_method); + ArtMethod hooked_method(hooked_jni_method); + auto *param = new ArtHookParam(); + + param->class_Loader_ = env->NewGlobalRef( + env->CallObjectMethod( + decl_class, + WellKnownClasses::java_lang_Class_getClassLoader + ) + ); + param->shorty_ = hooked_method.GetShorty(env, hooked_java_method); + param->is_static_ = hooked_method.HasAccessFlags(kAccStatic); + + param->origin_compiled_code_ = hooked_method.GetEntryPointFromQuickCompiledCode(); + param->origin_code_item_off = hooked_method.GetDexCodeItemOffset(); + param->origin_jni_code_ = hooked_method.GetEntryPointFromJni(); + param->origin_access_flags = hooked_method.GetAccessFlags(); + jobject origin_java_method = hooked_method.Clone(env, param->origin_access_flags); + + ResolvedSymbols *symbols = GetSymbols(); + if (symbols->ProfileSaver_ForceProcessProfiles) { + symbols->ProfileSaver_ForceProcessProfiles(); + } + // After android P, hotness_count_ maybe an imt_index_ for abstract method + if ((api_level_ > ANDROID_P && !hooked_method.HasAccessFlags(kAccAbstract)) + || api_level_ >= ANDROID_N) { + hooked_method.SetHotnessCount(0); + } + // Clear the dex_code_item_offset_. + // It needs to be 0 since hooked methods have no CodeItems but the + // method they copy might. + hooked_method.SetDexCodeItemOffset(0); + u4 access_flags = hooked_method.GetAccessFlags(); + if (api_level_ < ANDROID_O_MR1) { + access_flags |= kAccCompileDontBother_N; + } else { + access_flags |= kAccCompileDontBother_O_MR1; + access_flags |= kAccPreviouslyWarm_O_MR1; + } + access_flags |= kAccNative; + access_flags |= kAccFastNative; + if (api_level_ >= ANDROID_P) { + access_flags &= ~kAccCriticalNative_P; + } + hooked_method.SetAccessFlags(access_flags); + hooked_method.SetEntryPointFromQuickCompiledCode( + class_linker_objects_.quick_generic_jni_trampoline_ + ); + if (api_level_ < ANDROID_N + && symbols->artInterpreterToCompiledCodeBridge != nullptr) { + hooked_method.SetEntryPointFromInterpreterCode(symbols->artInterpreterToCompiledCodeBridge); + } + param->origin_native_method_ = env->FromReflectedMethod(origin_java_method); + param->hooked_native_method_ = hooked_jni_method; + param->addition_info_ = env->NewGlobalRef(addition_info); + param->hooked_method_ = env->NewGlobalRef(hooked_java_method); + param->origin_method_ = env->NewGlobalRef(origin_java_method); + + BuildJniClosure(param); + + hooked_method.SetEntryPointFromJni(param->jni_closure_->GetCode()); + param->decl_class_ = hooked_method.GetDeclaringClass(); + hooked_method_map_.insert(std::make_pair(hooked_jni_method, param)); + return reinterpret_cast(param); +} + +jobject +ArtRuntime::InvokeOriginalMethod(jlong slot, jobject this_object, jobjectArray args) { + JNIEnv *env = GetJniEnv(); + auto *param = reinterpret_cast(slot); + if (slot <= 0) { + env->ThrowNew( + WellKnownClasses::java_lang_IllegalArgumentException, + "Failed to resolve slot." + ); + return nullptr; + } + ArtMethod hooked_method(param->hooked_native_method_); + ptr_t decl_class = hooked_method.GetDeclaringClass(); + if (param->decl_class_ != decl_class) { + pthread_mutex_lock(&mutex); + if (param->decl_class_ != decl_class) { + ScopedSuspendAll suspend_all; + LOG(INFO) + << "Notice: MovingGC cause the GcRoot References changed."; + jobject origin_java_method = hooked_method.Clone(env, param->origin_access_flags); + jmethodID origin_jni_method = env->FromReflectedMethod(origin_java_method); + ArtMethod origin_method(origin_jni_method); + origin_method.SetEntryPointFromQuickCompiledCode(param->origin_compiled_code_); + origin_method.SetEntryPointFromJni(param->origin_jni_code_); + origin_method.SetDexCodeItemOffset(param->origin_code_item_off); + param->origin_native_method_ = origin_jni_method; + env->DeleteGlobalRef(param->origin_method_); + param->origin_method_ = env->NewGlobalRef(origin_java_method); + param->decl_class_ = decl_class; + } + pthread_mutex_unlock(&mutex); + } + + jobject ret = env->CallNonvirtualObjectMethod( + param->origin_method_, + WellKnownClasses::java_lang_reflect_Method, + WellKnownClasses::java_lang_reflect_Method_invoke, + this_object, + args + ); + return ret; +} + +#if defined(__aarch64__) +# define __get_tls() ({ void** __val; __asm__("mrs %0, tpidr_el0" : "=r"(__val)); __val; }) +#elif defined(__arm__) +# define __get_tls() ({ void** __val; __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__val)); __val; }) +#elif defined(__i386__) +# define __get_tls() ({ void** __val; __asm__("movl %%gs:0, %0" : "=r"(__val)); __val; }) +#elif defined(__x86_64__) +# define __get_tls() ({ void** __val; __asm__("mov %%fs:0, %0" : "=r"(__val)); __val; }) +#else +#error unsupported architecture +#endif + +ArtThread *ArtRuntime::GetCurrentArtThread() { + if (WellKnownClasses::java_lang_Thread_nativePeer) { + JNIEnv *env = GetJniEnv(); + jobject current = env->CallStaticObjectMethod( + WellKnownClasses::java_lang_Thread, + WellKnownClasses::java_lang_Thread_currentThread + ); + return reinterpret_cast( + env->GetLongField(current, WellKnownClasses::java_lang_Thread_nativePeer) + ); + } + return reinterpret_cast(__get_tls()[7/*TLS_SLOT_ART_THREAD_SELF*/]); +} + +jobject +ArtRuntime::InvokeHookedMethodBridge(JNIEnv *env, ArtHookParam *param, jobject receiver, + jobjectArray array) { + return env->CallStaticObjectMethod(java_class_, bridge_method_, + param->hooked_method_, reinterpret_cast(param), + param->addition_info_, receiver, array); +} + +jlong ArtRuntime::GetMethodSlot(JNIEnv *env, jclass cl, jobject method_obj) { + if (method_obj == nullptr) { + env->ThrowNew( + WellKnownClasses::java_lang_IllegalArgumentException, + "Method param == null" + ); + return 0; + } + jmethodID jni_method = env->FromReflectedMethod(method_obj); + auto entry = hooked_method_map_.find(jni_method); + if (entry == hooked_method_map_.end()) { + env->ThrowNew( + WellKnownClasses::java_lang_IllegalArgumentException, + "Failed to find slot." + ); + return 0; + } + return reinterpret_cast(entry->second); +} + +void ArtRuntime::EnsureClassInitialized(JNIEnv *env, jclass cl) { + // This invocation will ensure the target class has been initialized also. + ScopedLocalRef unused(env, env->AllocObject(cl)); + JNIExceptionClear(env); +} + +void ArtRuntime::SetObjectClass(JNIEnv *env, jobject obj, jclass cl) { + SetObjectClassUnsafe(env, obj, cl); +} + +void ArtRuntime::SetObjectClassUnsafe(JNIEnv *env, jobject obj, jclass cl) { + jfieldID java_lang_Class_shadow$_klass_ = env->GetFieldID( + WellKnownClasses::java_lang_Object, + "shadow$_klass_", + "Ljava/lang/Class;" + ); + env->SetObjectField(obj, java_lang_Class_shadow$_klass_, cl); +} + +jobject ArtRuntime::CloneToSubclass(JNIEnv *env, jobject obj, jclass sub_class) { + ResolvedSymbols *symbols = GetSymbols(); + ArtThread *thread = GetCurrentArtThread(); + ptr_t art_object = symbols->Thread_DecodeJObject(thread, obj); + ptr_t art_clone_object = CloneArtObject(art_object); + jobject clone = symbols->JniEnvExt_NewLocalRef(env, art_clone_object); + SetObjectClassUnsafe(env, clone, sub_class); + return clone; +} + +void ArtRuntime::RemoveFinalFlag(JNIEnv *env, jclass java_class) { + jfieldID java_lang_Class_accessFlags = env->GetFieldID( + WellKnownClasses::java_lang_Class, + "accessFlags", + "I" + ); + jint access_flags = env->GetIntField(java_class, java_lang_Class_accessFlags); + env->SetIntField(java_class, java_lang_Class_accessFlags, access_flags & ~kAccFinal); +} + +bool ArtRuntime::EnforceDisableHiddenAPIPolicy() { + if (GetAndroidApiLevel() < ANDROID_O_MR1) { + return true; + } + static Singleton enforced([&](bool *result) { + *result = EnforceDisableHiddenAPIPolicyImpl(); + }); + return enforced.Get(); +} + +bool OnInvokeHiddenAPI() { + return false; +} + +/** + * NOTICE: + * After Android Q(10.0), GetMemberActionImpl has been renamed to ShouldDenyAccessToMemberImpl, + * But we don't know the symbols until it's published. + */ +ALWAYS_INLINE bool ArtRuntime::EnforceDisableHiddenAPIPolicyImpl() { + JNIEnv *env = GetJniEnv(); + jfieldID java_lang_Class_shadow$_klass_ = env->GetFieldID( + WellKnownClasses::java_lang_Object, + "shadow$_klass_", + "Ljava/lang/Class;" + ); + JNIExceptionClear(env); + if (java_lang_Class_shadow$_klass_ != nullptr) { + return true; + } + void *symbol = nullptr; + + // Android P : Preview 1 ~ 4 version + symbol = WDynamicLibSymbol( + art_elf_image_, + "_ZN3art9hiddenapi25ShouldBlockAccessToMemberINS_8ArtFieldEEEbPT_PNS_6ThreadENSt3__18functionIFbS6_EEENS0_12AccessMethodE" + ); + if (symbol) { + WInlineHookFunction(symbol, reinterpret_cast(OnInvokeHiddenAPI), nullptr); + } + symbol = WDynamicLibSymbol( + art_elf_image_, + "_ZN3art9hiddenapi25ShouldBlockAccessToMemberINS_9ArtMethodEEEbPT_PNS_6ThreadENSt3__18functionIFbS6_EEENS0_12AccessMethodE" + ); + + if (symbol) { + WInlineHookFunction(symbol, reinterpret_cast(OnInvokeHiddenAPI), nullptr); + return true; + } + // Android P : Release version + symbol = WDynamicLibSymbol( + art_elf_image_, + "_ZN3art9hiddenapi6detail19GetMemberActionImplINS_8ArtFieldEEENS0_6ActionEPT_NS_20HiddenApiAccessFlags7ApiListES4_NS0_12AccessMethodE" + ); + if (symbol) { + WInlineHookFunction(symbol, reinterpret_cast(OnInvokeHiddenAPI), nullptr); + } + symbol = WDynamicLibSymbol( + art_elf_image_, + "_ZN3art9hiddenapi6detail19GetMemberActionImplINS_9ArtMethodEEENS0_6ActionEPT_NS_20HiddenApiAccessFlags7ApiListES4_NS0_12AccessMethodE" + ); + if (symbol) { + WInlineHookFunction(symbol, reinterpret_cast(OnInvokeHiddenAPI), nullptr); + } + return symbol != nullptr; +} + +ptr_t ArtRuntime::CloneArtObject(ptr_t art_object) { + ResolvedSymbols *symbols = GetSymbols(); + if (symbols->Object_Clone) { + return symbols->Object_Clone(art_object, GetCurrentArtThread()); + } + if (symbols->Object_CloneWithClass) { + return symbols->Object_CloneWithClass(art_object, GetCurrentArtThread(), nullptr); + } + return symbols->Object_CloneWithSize(art_object, GetCurrentArtThread(), 0); +} + +int (*old_ToDexPc)(void *thiz, void *a2, unsigned int a3, int a4); +int new_ToDexPc(void *thiz, void *a2, unsigned int a3, int a4) { + return old_ToDexPc(thiz, a2, a3, 0); +} + +bool is_hooked = false; +void ArtRuntime::FixBugN() { + if (is_hooked) + return; + void *symbol = nullptr; + symbol = WDynamicLibSymbol( + art_elf_image_, + "_ZNK3art20OatQuickMethodHeader7ToDexPcEPNS_9ArtMethodEjb" + ); + if (symbol) { + WInlineHookFunction(symbol, reinterpret_cast(new_ToDexPc), reinterpret_cast(&old_ToDexPc)); + } + is_hooked = true; +} + +} // namespace art +} // namespace whale diff --git a/module/src/main/cpp/whale/src/android/art/art_runtime.h b/module/src/main/cpp/whale/src/android/art/art_runtime.h new file mode 100644 index 00000000..55f47731 --- /dev/null +++ b/module/src/main/cpp/whale/src/android/art/art_runtime.h @@ -0,0 +1,153 @@ +#ifndef WHALE_ANDROID_ART_INTERCEPTOR_H_ +#define WHALE_ANDROID_ART_INTERCEPTOR_H_ + +#include +#include +#include "platform/linux/elf_image.h" +#include "platform/linux/process_map.h" +#include "dbi/instruction_set.h" +#include "android/art/art_symbol_resolver.h" +#include "android/art/art_hook_param.h" +#include "android/native_bridge.h" +#include "android/jni_helper.h" +#include "base/macros.h" +#include "base/primitive_types.h" + +#if defined(__LP64__) +static constexpr const char *kAndroidLibDir = "/system/lib64/"; +static constexpr const char *kLibNativeBridgePath = "/system/lib64/libnativebridge.so"; +static constexpr const char *kLibArtPath = "/system/lib64/libart.so"; +static constexpr const char *kLibAocPath = "/system/lib64/libaoc.so"; +static constexpr const char *kLibHoudiniArtPath = "/system/lib64/arm64/libart.so"; +#else +static constexpr const char *kAndroidLibDir = "/system/lib/"; +static constexpr const char *kLibArtPath = "/system/lib/libart.so"; +static constexpr const char *kLibAocPath = "/system/lib/libaoc.so"; +static constexpr const char *kLibHoudiniArtPath = "/system/lib/arm/libart.so"; +#endif + + +namespace whale { +namespace art { + +class ArtThread; + +struct ArtMethodOffsets final { + size_t method_size_; + offset_t jni_code_offset_; + offset_t quick_code_offset_; + offset_t OPTION interpreter_code_offset_; + offset_t access_flags_offset_; + offset_t dex_code_item_offset_offset_; + offset_t dex_method_index_offset_; + offset_t method_index_offset_; + offset_t OPTION hotness_count_offset_; +}; + +struct RuntimeObjects final { + ptr_t OPTION runtime_; + ptr_t OPTION heap_; + ptr_t OPTION thread_list_; + ptr_t OPTION class_linker_; + ptr_t OPTION intern_table_; +}; + +struct ClassLinkerObjects { + ptr_t quick_generic_jni_trampoline_; +}; + +class ArtRuntime final { + public: + friend class ArtMethod; + + static ArtRuntime *Get(); + + ArtRuntime() {} + + bool OnLoad(JavaVM *vm, JNIEnv *env, jclass java_class); + + jlong HookMethod(JNIEnv *env, jclass decl_class, jobject hooked_java_method, + jobject addition_info); + + JNIEnv *GetJniEnv() { + JNIEnv *env = nullptr; + jint ret = vm_->AttachCurrentThread(&env, nullptr); + DCHECK_EQ(JNI_OK, ret); + return env; + } + + ArtMethodOffsets *GetArtMethodOffsets() { + return &method_offset_; + } + + + RuntimeObjects *GetRuntimeObjects() { + return &runtime_objects_; + } + + ClassLinkerObjects *GetClassLinkerObjects() { + return &class_linker_objects_; + } + + ResolvedSymbols *GetSymbols() { + return art_symbol_resolver_.GetSymbols(); + } + + ArtThread *GetCurrentArtThread(); + + void EnsureClassInitialized(JNIEnv *env, jclass cl); + + + jobject + InvokeHookedMethodBridge(JNIEnv *env, ArtHookParam *param, jobject receiver, + jobjectArray array); + + jobject + InvokeOriginalMethod(jlong slot, jobject this_object, jobjectArray args); + + jlong GetMethodSlot(JNIEnv *env, jclass cl, jobject method_obj); + + ALWAYS_INLINE void VisitInterceptParams(std::function visitor) { + for (auto &entry : hooked_method_map_) { + visitor(entry.second); + } + } + + void SetObjectClass(JNIEnv *env, jobject obj, jclass cl); + + void SetObjectClassUnsafe(JNIEnv *env, jobject obj, jclass cl); + + jobject CloneToSubclass(JNIEnv *env, jobject obj, jclass sub_class); + + void RemoveFinalFlag(JNIEnv *env, jclass java_class); + + bool EnforceDisableHiddenAPIPolicy(); + + ptr_t CloneArtObject(ptr_t art_object); + + void FixBugN(); + + private: + JavaVM *vm_; + jclass java_class_; + jmethodID bridge_method_; + s4 api_level_; + void *art_elf_image_; + NativeBridgeCallbacks OPTION *android_bridge_callbacks_; + ArtSymbolResolver art_symbol_resolver_; + RuntimeObjects runtime_objects_; + ClassLinkerObjects class_linker_objects_; + ArtMethodOffsets method_offset_; + std::map hooked_method_map_; + pthread_mutex_t mutex; + + bool EnforceDisableHiddenAPIPolicyImpl(); + + DISALLOW_COPY_AND_ASSIGN(ArtRuntime); +}; + + +} // namespace art +} // namespace whale + +#endif // WHALE_ANDROID_ART_INTERCEPTOR_H_ diff --git a/module/src/main/cpp/whale/src/android/art/art_symbol_resolver.cc b/module/src/main/cpp/whale/src/android/art/art_symbol_resolver.cc new file mode 100644 index 00000000..ad2500cf --- /dev/null +++ b/module/src/main/cpp/whale/src/android/art/art_symbol_resolver.cc @@ -0,0 +1,118 @@ +#include "whale.h" +#include "android/art/art_symbol_resolver.h" +#include "android/android_build.h" +#include "android/art/art_runtime.h" + +#define SYMBOL static constexpr const char * + +namespace whale { +namespace art { + +// art::ArtMethod::CopyFrom(art::ArtMethod*, art::PointerSize) +SYMBOL kArtMethod_CopyFrom_O = "_ZN3art9ArtMethod8CopyFromEPS0_NS_11PointerSizeE"; +#if defined(__LP64__) +// art::ArtMethod::CopyFrom(art::ArtMethod*, unsigned long) +SYMBOL kArtMethod_CopyFrom_N = "_ZN3art9ArtMethod8CopyFromEPS0_m"; +// art::ArtMethod::CopyFrom(art::ArtMethod const*, unsigned long) +SYMBOL kArtMethod_CopyFrom_M = "_ZN3art9ArtMethod8CopyFromEPKS0_m"; +#else +// art::ArtMethod::CopyFrom(art::ArtMethod*, unsigned int) +SYMBOL kArtMethod_CopyFrom_N = "_ZN3art9ArtMethod8CopyFromEPS0_j"; +// art::ArtMethod::CopyFrom(art::ArtMethod const*, unsigned int) +SYMBOL kArtMethod_CopyFrom_M = "_ZN3art9ArtMethod8CopyFromEPKS0_j"; +#endif + +// art::GetMethodShorty(_JNIEnv*, _jmethodID*) +SYMBOL kArt_GetMethodShorty = "_ZN3artL15GetMethodShortyEP7_JNIEnvP10_jmethodID"; +SYMBOL kArt_GetMethodShorty_Legacy = "_ZN3art15GetMethodShortyEP7_JNIEnvP10_jmethodID"; + +// art::Dbg::SuspendVM() +SYMBOL kDbg_SuspendVM = "_ZN3art3Dbg9SuspendVMEv"; +// art::Dbg::ResumeVM() +SYMBOL kDbg_ResumeVM = "_ZN3art3Dbg8ResumeVMEv"; + +// art_quick_to_interpreter_bridge() +SYMBOL kArt_art_quick_to_interpreter_bridge = "art_quick_to_interpreter_bridge"; + +// artInterpreterToCompiledCodeBridge +SYMBOL kArt_artInterpreterToCompiledCodeBridge = "artInterpreterToCompiledCodeBridge"; + +// art::ProfileSaver::ForceProcessProfiles() +SYMBOL kArt_ProfileSaver_ForceProcessProfiles = "_ZN3art12ProfileSaver20ForceProcessProfilesEv"; + +#if defined(__LP64__) +// art::LinearAlloc::Alloc(art::Thread*, unsigned int) +SYMBOL kArt_LinearAlloc_Alloc = "_ZN3art11LinearAlloc5AllocEPNS_6ThreadEm"; +#else +SYMBOL kArt_LinearAlloc_Alloc = "_ZN3art11LinearAlloc5AllocEPNS_6ThreadEj"; +#endif +// art::Thread::DecodeJObject(_jobject*) const +SYMBOL kArt_DecodeJObject = "_ZNK3art6Thread13DecodeJObjectEP8_jobject"; + +// art::ClassLinker::EnsureInitialized(art::Thread*, art::Handle, bool, bool) +SYMBOL kArt_EnsureInitialized = "_ZN3art11ClassLinker17EnsureInitializedEPNS_6ThreadENS_6HandleINS_6mirror5ClassEEEbb"; + +// art::mirror::Object::Clone(art::Thread*) +SYMBOL kArt_Object_Clone = "_ZN3art6mirror6Object5CloneEPNS_6ThreadE"; + +// art::mirror::Object::Clone(art::Thread*, art::mirror::Class*) +SYMBOL kArt_Object_CloneWithClass = "_ZN3art6mirror6Object5CloneEPNS_6ThreadEPNS0_5ClassE"; + +#if defined(__LP64__) +// art::mirror::Object::Clone(art::Thread*, unsigned long) +SYMBOL kArt_Object_CloneWithSize = "_ZN3art6mirror6Object5CloneEPNS_6ThreadEm"; +#else +// art::mirror::Object::Clone(art::Thread*, unsigned int) +SYMBOL kArt_Object_CloneWithSize = "_ZN3art6mirror6Object5CloneEPNS_6ThreadEj"; +#endif + +SYMBOL kArt_JniEnvExt_NewLocalRef = "_ZN3art9JNIEnvExt11NewLocalRefEPNS_6mirror6ObjectE"; + + +bool ArtSymbolResolver::Resolve(void *elf_image, s4 api_level) { +#define FIND_SYMBOL(symbol, decl, ret) \ + if ((decl = reinterpret_cast(WDynamicLibSymbol(elf_image, symbol))) == nullptr) { \ + if (ret) { \ + LOG(ERROR) << "Failed to resolve symbol : " << #symbol; \ + return false; \ + } \ + } + FIND_SYMBOL(kArt_GetMethodShorty, symbols_.Art_GetMethodShorty, false); + if (symbols_.Art_GetMethodShorty == nullptr) { + FIND_SYMBOL(kArt_GetMethodShorty_Legacy, symbols_.Art_GetMethodShorty, false); + } + if (api_level < ANDROID_N) { + FIND_SYMBOL(kArt_artInterpreterToCompiledCodeBridge, + symbols_.artInterpreterToCompiledCodeBridge, false); + } + FIND_SYMBOL(kDbg_SuspendVM, symbols_.Dbg_SuspendVM, false); + FIND_SYMBOL(kDbg_ResumeVM, symbols_.Dbg_ResumeVM, false); + FIND_SYMBOL(kArt_art_quick_to_interpreter_bridge, symbols_.art_quick_to_interpreter_bridge, + false); + if (api_level > ANDROID_N) { + FIND_SYMBOL(kArt_ProfileSaver_ForceProcessProfiles, + symbols_.ProfileSaver_ForceProcessProfiles, + false); + } + if (api_level > ANDROID_O) { + FIND_SYMBOL(kArtMethod_CopyFrom_O, symbols_.ArtMethod_CopyFrom, false); + } else if (api_level > ANDROID_N) { + FIND_SYMBOL(kArtMethod_CopyFrom_N, symbols_.ArtMethod_CopyFrom, false); + } else { + FIND_SYMBOL(kArtMethod_CopyFrom_M, symbols_.ArtMethod_CopyFrom, false); + } + FIND_SYMBOL(kArt_Object_Clone, symbols_.Object_Clone, false); + if (symbols_.Object_Clone == nullptr) { + FIND_SYMBOL(kArt_Object_CloneWithSize, symbols_.Object_CloneWithSize, false); + } + if (symbols_.Object_Clone == nullptr) { + FIND_SYMBOL(kArt_Object_CloneWithClass, symbols_.Object_CloneWithClass, true); + } + FIND_SYMBOL(kArt_DecodeJObject, symbols_.Thread_DecodeJObject, true); + FIND_SYMBOL(kArt_JniEnvExt_NewLocalRef, symbols_.JniEnvExt_NewLocalRef, true); + return true; +#undef FIND_SYMBOL +} + +} // namespace art +} // namespace whale diff --git a/module/src/main/cpp/whale/src/android/art/art_symbol_resolver.h b/module/src/main/cpp/whale/src/android/art/art_symbol_resolver.h new file mode 100644 index 00000000..da8a8607 --- /dev/null +++ b/module/src/main/cpp/whale/src/android/art/art_symbol_resolver.h @@ -0,0 +1,54 @@ +#ifndef WHALE_ANDROID_ART_SYMBOL_RESOLVER_H_ +#define WHALE_ANDROID_ART_SYMBOL_RESOLVER_H_ + +#include +#include "platform/linux/elf_image.h" +#include "base/primitive_types.h" + +namespace whale { +namespace art { + +struct ResolvedSymbols { + const char *(*Art_GetMethodShorty)(JNIEnv *, jmethodID); + + void (*Dbg_SuspendVM)(); + + void (*Dbg_ResumeVM)(); + + void *art_quick_to_interpreter_bridge; + void *artInterpreterToCompiledCodeBridge; + + void (*ProfileSaver_ForceProcessProfiles)(); + + void (*ArtMethod_CopyFrom)(jmethodID this_ptr, jmethodID from, size_t num_bytes); + + ptr_t (*Thread_DecodeJObject)(ptr_t thread, jobject obj); + + ptr_t (*Object_Clone)(ptr_t object_this, ptr_t thread); + + ptr_t (*Object_CloneWithClass)(ptr_t object_this, ptr_t thread, ptr_t cls); + + ptr_t (*Object_CloneWithSize)(ptr_t object_this, ptr_t thread, size_t num_bytes); + + jobject (*JniEnvExt_NewLocalRef)(JNIEnv *jnienv_ext_this, ptr_t art_object); + +}; + +class ArtSymbolResolver { + public: + ArtSymbolResolver() = default; + + bool Resolve(void *elf_image, s4 api_level); + + ResolvedSymbols *GetSymbols() { + return &symbols_; + }; + + private: + ResolvedSymbols symbols_; +}; + +} // namespace art +} // namespace whale + +#endif // WHALE_ANDROID_ART_SYMBOL_RESOLVER_H_ diff --git a/module/src/main/cpp/whale/src/android/art/java_types.cc b/module/src/main/cpp/whale/src/android/art/java_types.cc new file mode 100644 index 00000000..9d83d4bf --- /dev/null +++ b/module/src/main/cpp/whale/src/android/art/java_types.cc @@ -0,0 +1,99 @@ +#include "android/art/java_types.h" + +namespace whale { +namespace art { + +#define EXPORT_LANG_ClASS(c) jclass Types::java_lang_##c; jmethodID Types::java_lang_##c##_init; jmethodID Types::java_value_##c; + +EXPORT_LANG_ClASS(Integer); +EXPORT_LANG_ClASS(Long); +EXPORT_LANG_ClASS(Float); +EXPORT_LANG_ClASS(Double); +EXPORT_LANG_ClASS(Byte); +EXPORT_LANG_ClASS(Short); +EXPORT_LANG_ClASS(Boolean); +EXPORT_LANG_ClASS(Character); + +#undef EXPORT_LANG_ClASS + +void Types::Load(JNIEnv *env) { + jclass clazz; + env->PushLocalFrame(16); + +#define LOAD_CLASS(c, s) clazz = env->FindClass(s); c = reinterpret_cast(env->NewWeakGlobalRef(clazz)) +#define LOAD_LANG_CLASS(c, s) LOAD_CLASS(java_lang_##c, "java/lang/" #c); java_lang_##c##_init = env->GetMethodID(java_lang_##c, "", s) + + LOAD_LANG_CLASS(Integer, "(I)V"); + LOAD_LANG_CLASS(Long, "(J)V"); + LOAD_LANG_CLASS(Float, "(F)V"); + LOAD_LANG_CLASS(Double, "(D)V"); + LOAD_LANG_CLASS(Byte, "(B)V"); + LOAD_LANG_CLASS(Short, "(S)V"); + LOAD_LANG_CLASS(Boolean, "(Z)V"); + LOAD_LANG_CLASS(Character, "(C)V"); + +#undef LOAD_CLASS +#undef LOAD_LANG_CLASS + +#define LOAD_METHOD(k, c, r, s) java_value_##c = env->GetMethodID(k, r "Value", s) +#define LOAD_NUMBER(c, r, s) LOAD_METHOD(java_lang_Number, c, r, s) + + jclass java_lang_Number = env->FindClass("java/lang/Number"); + + LOAD_NUMBER(Integer, "int", "()I"); + LOAD_NUMBER(Long, "long", "()J"); + LOAD_NUMBER(Float, "float", "()F"); + LOAD_NUMBER(Double, "double", "()D"); + LOAD_NUMBER(Byte, "byte", "()B"); + LOAD_NUMBER(Short, "short", "()S"); + + LOAD_METHOD(java_lang_Boolean, Boolean, "boolean", "()Z"); + LOAD_METHOD(java_lang_Character, Character, "char", "()C"); + + env->PopLocalFrame(nullptr); +#undef LOAD_METHOD +#undef LOAD_NUMBER +} + + +#define LANG_BOX(c, t) jobject Types::To##c(JNIEnv *env, t v) { \ + return env->NewObject(Types::java_lang_##c, Types::java_lang_##c##_init, v); \ +} +#define LANG_UNBOX_V(k, c, t) t Types::From##c(JNIEnv *env, jobject j) { \ + return env->Call##k##Method(j, Types::java_value_##c); \ +} +#define LANG_UNBOX(c, t) LANG_UNBOX_V(c, c, t) + +LANG_BOX(Integer, jint); +LANG_BOX(Long, jlong); +LANG_BOX(Float, jfloat); +LANG_BOX(Double, jdouble); +LANG_BOX(Byte, jbyte); +LANG_BOX(Short, jshort); +LANG_BOX(Boolean, jboolean); +LANG_BOX(Character, jchar); + +jobject Types::ToObject(JNIEnv *env, jobject obj) { + return obj; +} + +LANG_UNBOX_V(Int, Integer, jint); +LANG_UNBOX(Long, jlong); +LANG_UNBOX(Float, jfloat); +LANG_UNBOX(Double, jdouble); +LANG_UNBOX(Byte, jbyte); +LANG_UNBOX(Short, jshort); +LANG_UNBOX(Boolean, jboolean); +LANG_UNBOX_V(Char, Character, jchar); + +jobject Types::FromObject(JNIEnv *env, jobject obj) { + return obj; +} + + +#undef LANG_BOX +#undef LANG_UNBOX_V +#undef LANG_UNBOX + +} // namespace art +} // namespace whale diff --git a/module/src/main/cpp/whale/src/android/art/java_types.h b/module/src/main/cpp/whale/src/android/art/java_types.h new file mode 100644 index 00000000..830a58fb --- /dev/null +++ b/module/src/main/cpp/whale/src/android/art/java_types.h @@ -0,0 +1,61 @@ +#ifndef WHALE_ANDROID_ART_JAVA_TYPES_H_ +#define WHALE_ANDROID_ART_JAVA_TYPES_H_ + +#include +#include "android/art/art_runtime.h" + + +namespace whale { +namespace art { + +struct Types { +#define LANG_ClASS(c) static jclass java_lang_##c; static jmethodID java_lang_##c##_init; static jmethodID java_value_##c; + + LANG_ClASS(Integer); + LANG_ClASS(Long); + LANG_ClASS(Float); + LANG_ClASS(Double); + LANG_ClASS(Byte); + LANG_ClASS(Short); + LANG_ClASS(Boolean); + LANG_ClASS(Character); + +#undef LANG_ClASS + static void Load(JNIEnv *env); + + +#define LANG_BOX_DEF(c, t) static jobject To##c(JNIEnv *env, t v); + +#define LANG_UNBOX_V_DEF(k, c, t) static t From##c(JNIEnv *env, jobject j); + +#define LANG_UNBOX_DEF(c, t) LANG_UNBOX_V_DEF(c, c, t) + + LANG_BOX_DEF(Object, jobject); + LANG_BOX_DEF(Integer, jint); + LANG_BOX_DEF(Long, jlong); + LANG_BOX_DEF(Float, jfloat); + LANG_BOX_DEF(Double, jdouble); + LANG_BOX_DEF(Byte, jbyte); + LANG_BOX_DEF(Short, jshort); + LANG_BOX_DEF(Boolean, jboolean); + LANG_BOX_DEF(Character, jchar); + + LANG_UNBOX_V_DEF(Int, Integer, jint); + LANG_UNBOX_DEF(Object, jobject); + LANG_UNBOX_DEF(Long, jlong); + LANG_UNBOX_DEF(Float, jfloat); + LANG_UNBOX_DEF(Double, jdouble); + LANG_UNBOX_DEF(Byte, jbyte); + LANG_UNBOX_DEF(Short, jshort); + LANG_UNBOX_DEF(Boolean, jboolean); + LANG_UNBOX_V_DEF(Char, Character, jchar); + +#undef LANG_BOX_DEF +#undef LANG_UNBOX_V_DEF +#undef LANG_UNBOX_DEF +}; + +} // namespace art +} // namespace whale + +#endif // WHALE_ANDROID_ART_JAVA_TYPES_H_ diff --git a/module/src/main/cpp/whale/src/android/art/modifiers.h b/module/src/main/cpp/whale/src/android/art/modifiers.h new file mode 100644 index 00000000..8320104c --- /dev/null +++ b/module/src/main/cpp/whale/src/android/art/modifiers.h @@ -0,0 +1,58 @@ +#ifndef WHALE_ANDROID_ART_MODIFIERS_H_ +#define WHALE_ANDROID_ART_MODIFIERS_H_ + +#include "base/primitive_types.h" + +namespace whale { +namespace art { + +static constexpr u4 kAccPublic = 0x0001; // class, field, method, ic +static constexpr u4 kAccPrivate = 0x0002; // field, method, ic +static constexpr u4 kAccProtected = 0x0004; // field, method, ic +static constexpr u4 kAccStatic = 0x0008; // field, method, ic +static constexpr u4 kAccFinal = 0x0010; // class, field, method, ic +static constexpr u4 kAccSynchronized = 0x0020; // method (only allowed on natives) +static constexpr u4 kAccSuper = 0x0020; // class (not used in dex) +static constexpr u4 kAccVolatile = 0x0040; // field +static constexpr u4 kAccBridge = 0x0040; // method (1.5) +static constexpr u4 kAccTransient = 0x0080; // field +static constexpr u4 kAccVarargs = 0x0080; // method (1.5) +static constexpr u4 kAccNative = 0x0100; // method +static constexpr u4 kAccInterface = 0x0200; // class, ic +static constexpr u4 kAccAbstract = 0x0400; // class, method, ic +static constexpr u4 kAccStrict = 0x0800; // method +static constexpr u4 kAccSynthetic = 0x1000; // class, field, method, ic +static constexpr u4 kAccAnnotation = 0x2000; // class, ic (1.5) +static constexpr u4 kAccEnum = 0x4000; // class, field, ic (1.5) + +static constexpr u4 kAccJavaFlagsMask = 0xffff; // bits set from Java sources (low 16) + +static constexpr u4 kAccConstructor = 0x00010000; // method (dex only) <(cl)init> +static constexpr u4 kAccDeclaredSynchronized = 0x00020000; // method (dex only) +static constexpr u4 kAccClassIsProxy = 0x00040000; // class (dex only) +// Set to indicate that the ArtMethod is obsolete and has a different DexCache + DexFile from its +// declaring class. This flag may only be applied to methods. +static constexpr u4 kAccObsoleteMethod = 0x00040000; // method (runtime) + +static constexpr uint32_t kAccFastNative = 0x00080000u; // method (dex only) +static constexpr uint32_t kAccPreverified = kAccFastNative; // class (runtime) +static constexpr uint32_t kAccSkipAccessChecks = kAccPreverified; +static constexpr uint32_t kAccCriticalNative_P = 0x00200000; // method (runtime; native only) +// Android M only +static constexpr uint32_t kAccDontInline = 0x00400000u; // method (dex only) +// Android N or later. Set by the verifier for a method we do not want the compiler to compile. +static constexpr uint32_t kAccCompileDontBother_N = 0x01000000u; // method (runtime) +// Android O MR1 or later. Set by the verifier for a method we do not want the compiler to compile. +static constexpr uint32_t kAccCompileDontBother_O_MR1 = 0x02000000; // method (runtime) +// Set by the JIT when clearing profiling infos to denote that a method was previously warm. +static constexpr uint32_t kAccPreviouslyWarm_O_MR1 = 0x00800000; // method (runtime) + +static constexpr uint32_t kAccDirectFlags = kAccStatic | kAccPrivate | kAccConstructor; + +static constexpr uint32_t kAccPublicApi = 0x10000000; // field, method +static constexpr uint32_t kAccHiddenapiBits = 0x30000000; // field, method + +} // namespace art +} // namespace whale + +#endif // WHALE_ANDROID_ART_MODIFIERS_H_ diff --git a/module/src/main/cpp/whale/src/android/art/native_on_load.cc b/module/src/main/cpp/whale/src/android/art/native_on_load.cc new file mode 100644 index 00000000..f587d55e --- /dev/null +++ b/module/src/main/cpp/whale/src/android/art/native_on_load.cc @@ -0,0 +1,96 @@ +#include "android/jni_helper.h" +#include "android/art/art_runtime.h" +#include "base/logging.h" + +#define CLASS_NAME "com/lody/whale/WhaleRuntime" + +#ifndef WHALE_ANDROID_AUTO_LOAD +#define JNI_OnLoad Whale_OnLoad +#endif + + +extern "C" OPEN_API void WhaleRuntime_reserved0(JNI_START) {} + +extern "C" OPEN_API void WhaleRuntime_reserved1(JNI_START) {} + +static jlong +WhaleRuntime_hookMethodNative(JNI_START, jclass decl_class, jobject method_obj, + jobject addition_info) { + auto runtime = whale::art::ArtRuntime::Get(); + return runtime->HookMethod(env, decl_class, method_obj, addition_info); +} + +static jobject +WhaleRuntime_invokeOriginalMethodNative(JNI_START, jlong slot, jobject this_object, + jobjectArray args) { + auto runtime = whale::art::ArtRuntime::Get(); + return runtime->InvokeOriginalMethod(slot, this_object, args); +} + +static jlong +WhaleRuntime_getMethodSlot(JNI_START, jclass decl_class, jobject method_obj) { + auto runtime = whale::art::ArtRuntime::Get(); + return runtime->GetMethodSlot(env, decl_class, method_obj); +} + +static void +WhaleRuntime_setObjectClassNative(JNI_START, jobject obj, jclass parent_class) { + auto runtime = whale::art::ArtRuntime::Get(); + return runtime->SetObjectClass(env, obj, parent_class); +} + +static jobject +WhaleRuntime_cloneToSubclassNative(JNI_START, jobject obj, jclass sub_class) { + auto runtime = whale::art::ArtRuntime::Get(); + return runtime->CloneToSubclass(env, obj, sub_class); +} + +static void +WhaleRuntime_removeFinalFlagNative(JNI_START, jclass java_class) { + auto runtime = whale::art::ArtRuntime::Get(); + runtime->RemoveFinalFlag(env, java_class); +} + +void WhaleRuntime_enforceDisableHiddenAPIPolicy(JNI_START) { + auto runtime = whale::art::ArtRuntime::Get(); + runtime->EnforceDisableHiddenAPIPolicy(); +} + + +static JNINativeMethod gMethods[] = { + NATIVE_METHOD(WhaleRuntime, hookMethodNative, + "(Ljava/lang/Class;Ljava/lang/reflect/Member;Ljava/lang/Object;)J"), + NATIVE_METHOD(WhaleRuntime, invokeOriginalMethodNative, + "(JLjava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;"), + NATIVE_METHOD(WhaleRuntime, getMethodSlot, "(Ljava/lang/reflect/Member;)J"), + NATIVE_METHOD(WhaleRuntime, setObjectClassNative, "(Ljava/lang/Object;Ljava/lang/Class;)V"), + NATIVE_METHOD(WhaleRuntime, cloneToSubclassNative, + "(Ljava/lang/Object;Ljava/lang/Class;)Ljava/lang/Object;"), + NATIVE_METHOD(WhaleRuntime, removeFinalFlagNative, + "(Ljava/lang/Class;)V"), + NATIVE_METHOD(WhaleRuntime, enforceDisableHiddenAPIPolicy, "()V"), + NATIVE_METHOD(WhaleRuntime, reserved0, "()V"), + NATIVE_METHOD(WhaleRuntime, reserved1, "()V") +}; + +JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM *vm, void *reserved) { + JNIEnv *env = nullptr; + if (vm->GetEnv(reinterpret_cast(&env), JNI_VERSION_1_4) != JNI_OK) { + return -1; + } + jclass cl = env->FindClass(CLASS_NAME); + if (cl == nullptr) { + LOG(ERROR) << "FindClass failed for " << CLASS_NAME; + return JNI_ERR; + } + if (env->RegisterNatives(cl, gMethods, NELEM(gMethods)) < 0) { + LOG(ERROR) << "RegisterNatives failed for " << CLASS_NAME; + return JNI_ERR; + } + auto runtime = whale::art::ArtRuntime::Get(); + if (!runtime->OnLoad(vm, env, cl)) { + LOG(ERROR) << "Runtime setup failed"; + return JNI_ERR; + } + return JNI_VERSION_1_4; +} diff --git a/module/src/main/cpp/whale/src/android/art/native_on_load.h b/module/src/main/cpp/whale/src/android/art/native_on_load.h new file mode 100644 index 00000000..4b2a453f --- /dev/null +++ b/module/src/main/cpp/whale/src/android/art/native_on_load.h @@ -0,0 +1,25 @@ +#ifndef WHALE_ANDROID_ART_NATIVE_ON_LOAD_H_ +#define WHALE_ANDROID_ART_NATIVE_ON_LOAD_H_ + +#include + +constexpr const char *kMethodReserved0 = "reserved0"; +constexpr const char *kMethodReserved1 = "reserved1"; + +/** + * DO NOT rename the following function + */ +extern "C" { + +void WhaleRuntime_reserved0(JNIEnv *env, jclass cl); + +void WhaleRuntime_reserved1(JNIEnv *env, jclass cl); + +} + +#ifndef WHALE_ANDROID_AUTO_LOAD +JNIEXPORT jint JNICALL Whale_OnLoad(JavaVM *vm, void *reserved); +#endif + + +#endif // WHALE_ANDROID_ART_NATIVE_ON_LOAD_H_ diff --git a/module/src/main/cpp/whale/src/android/art/scoped_thread_state_change.cc b/module/src/main/cpp/whale/src/android/art/scoped_thread_state_change.cc new file mode 100644 index 00000000..32a502a4 --- /dev/null +++ b/module/src/main/cpp/whale/src/android/art/scoped_thread_state_change.cc @@ -0,0 +1,76 @@ +#include "android/jni_helper.h" +#include "android/android_build.h" +#include "android/art/scoped_thread_state_change.h" +#include "art_runtime.h" + +namespace whale { +namespace art { + +static volatile long kNoGCDaemonsGuard = 0; + +jclass ScopedNoGCDaemons::java_lang_Daemons; +jmethodID ScopedNoGCDaemons::java_lang_Daemons_start; +jmethodID ScopedNoGCDaemons::java_lang_Daemons_stop; + +void ScopedNoGCDaemons::Load(JNIEnv *env) { + java_lang_Daemons = reinterpret_cast(env->NewGlobalRef( + env->FindClass("java/lang/Daemons"))); + if (java_lang_Daemons == nullptr) { + JNIExceptionClear(env); + LOG(WARNING) << "java/lang/Daemons API is unavailable."; + return; + } + java_lang_Daemons_start = env->GetStaticMethodID(java_lang_Daemons, "start", "()V"); + if (java_lang_Daemons_start == nullptr) { + JNIExceptionClear(env); + java_lang_Daemons_start = env->GetStaticMethodID(java_lang_Daemons, "startPostZygoteFork", + "()V"); + } + if (java_lang_Daemons_start == nullptr) { + LOG(WARNING) + << "java/lang/Daemons API is available but no start/startPostZygoteFork method."; + JNIExceptionClear(env); + } + java_lang_Daemons_stop = env->GetStaticMethodID(java_lang_Daemons, "stop", "()V"); + JNIExceptionClear(env); +} + +ScopedNoGCDaemons::ScopedNoGCDaemons(JNIEnv *env) : env_(env) { + if (java_lang_Daemons_start != nullptr) { + if (__sync_sub_and_fetch(&kNoGCDaemonsGuard, 1) <= 0) { + env_->CallStaticVoidMethod(java_lang_Daemons, java_lang_Daemons_stop); + JNIExceptionClear(env_); + } + } +} + + +ScopedNoGCDaemons::~ScopedNoGCDaemons() { + if (java_lang_Daemons_stop != nullptr) { + if (__sync_add_and_fetch(&kNoGCDaemonsGuard, 1) == 1) { + env_->CallStaticVoidMethod(java_lang_Daemons, java_lang_Daemons_start); + JNIExceptionClear(env_); + } + } +} + +ScopedSuspendAll::ScopedSuspendAll() { + ResolvedSymbols *symbols = art::ArtRuntime::Get()->GetSymbols(); + if (symbols->Dbg_SuspendVM && symbols->Dbg_ResumeVM) { + symbols->Dbg_SuspendVM(); + } else { + LOG(WARNING) << "Suspend VM API is unavailable."; + } +} + +ScopedSuspendAll::~ScopedSuspendAll() { + ResolvedSymbols *symbols = art::ArtRuntime::Get()->GetSymbols(); + if (symbols->Dbg_SuspendVM && symbols->Dbg_ResumeVM) { + symbols->Dbg_ResumeVM(); + } +} + + +} // namespace art +} // namespace whale + diff --git a/module/src/main/cpp/whale/src/android/art/scoped_thread_state_change.h b/module/src/main/cpp/whale/src/android/art/scoped_thread_state_change.h new file mode 100644 index 00000000..9fe2bc18 --- /dev/null +++ b/module/src/main/cpp/whale/src/android/art/scoped_thread_state_change.h @@ -0,0 +1,37 @@ +#ifndef WHALE_ANDROID_ART__SCOPED_THREAD_STATE_CHANGE_H_ +#define WHALE_ANDROID_ART__SCOPED_THREAD_STATE_CHANGE_H_ + +#include + +namespace whale { +namespace art { + +class ScopedNoGCDaemons { + static jclass java_lang_Daemons; + static jmethodID java_lang_Daemons_start; + static jmethodID java_lang_Daemons_stop; + + public: + static void Load(JNIEnv *env); + + ScopedNoGCDaemons(JNIEnv *env); + + ~ScopedNoGCDaemons(); + + private: + JNIEnv *env_; +}; + +class ScopedSuspendAll { + public: + ScopedSuspendAll(); + + ~ScopedSuspendAll(); +}; + + +} // namespace art +} // namespace whale + +#endif // WHALE_ANDROID_ART__SCOPED_THREAD_STATE_CHANGE_H_ + diff --git a/module/src/main/cpp/whale/src/android/art/well_known_classes.cc b/module/src/main/cpp/whale/src/android/art/well_known_classes.cc new file mode 100644 index 00000000..7b4ffc6c --- /dev/null +++ b/module/src/main/cpp/whale/src/android/art/well_known_classes.cc @@ -0,0 +1,101 @@ +#include "android/art/well_known_classes.h" +#include "android/jni_helper.h" +#include "base/logging.h" +#include "base/stringprintf.h" + +namespace whale { +namespace art { + +jclass WellKnownClasses::java_lang_Object; +jclass WellKnownClasses::java_lang_reflect_Method; +jclass WellKnownClasses::java_lang_Class; +jclass WellKnownClasses::java_lang_ClassLoader; +jclass WellKnownClasses::java_lang_reflect_AccessibleObject; +jclass WellKnownClasses::java_lang_Thread; +jclass WellKnownClasses::java_lang_IllegalArgumentException; + +jmethodID WellKnownClasses::java_lang_reflect_Method_invoke; +jmethodID WellKnownClasses::java_lang_Class_getClassLoader; +jmethodID WellKnownClasses::java_lang_reflect_AccessibleObject_setAccessible; +jmethodID WellKnownClasses::java_lang_Thread_currentThread; + +jfieldID WellKnownClasses::java_lang_Thread_nativePeer; + +static jclass CacheClass(JNIEnv *env, const char *jni_class_name) { + ScopedLocalRef c(env, env->FindClass(jni_class_name)); + if (c.get() == nullptr) { + LOG(FATAL) << "Couldn't find class: " << jni_class_name; + } + return reinterpret_cast(env->NewGlobalRef(c.get())); +} + +static jmethodID CacheMethod(JNIEnv *env, jclass c, bool is_static, + const char *name, const char *signature) { + jmethodID mid = is_static ? env->GetStaticMethodID(c, name, signature) : + env->GetMethodID(c, name, signature); + if (mid == nullptr) { + LOG(FATAL) << "Couldn't find method \"" << name << "\" with signature \"" << signature; + if (env->ExceptionCheck()) { + env->ExceptionOccurred(); + } + } + return mid; +} + +static jfieldID CacheField(JNIEnv *env, jclass c, bool is_static, + const char *name, const char *signature, bool weak = false) { + jfieldID fid = is_static ? env->GetStaticFieldID(c, name, signature) : + env->GetFieldID(c, name, signature); + if (fid == nullptr) { + if (env->ExceptionCheck()) { + env->ExceptionClear(); + } + if (weak) { + LOG(FATAL) << "Couldn't find field \"" << name << "\" with signature \"" << signature; + } + } + return fid; +} + +static jmethodID CacheMethod(JNIEnv *env, const char *klass, bool is_static, + const char *name, const char *signature) { + ScopedLocalRef java_class(env, env->FindClass(klass)); + return CacheMethod(env, java_class.get(), is_static, name, signature); +} + +static jmethodID CachePrimitiveBoxingMethod(JNIEnv *env, char prim_name, const char *boxed_name) { + ScopedLocalRef boxed_class(env, env->FindClass(boxed_name)); + return CacheMethod(env, boxed_class.get(), true, "valueOf", + StringPrintf("(%c)L%s;", prim_name, boxed_name).c_str()); +} + + +void WellKnownClasses::Load(JNIEnv *env) { + java_lang_Object = CacheClass(env, "java/lang/Object"); + java_lang_reflect_Method = CacheClass(env, "java/lang/reflect/Method"); + java_lang_Class = CacheClass(env, "java/lang/Class"); + java_lang_ClassLoader = CacheClass(env, "java/lang/ClassLoader"); + java_lang_reflect_AccessibleObject = CacheClass(env, "java/lang/reflect/AccessibleObject"); + java_lang_Thread = CacheClass(env, "java/lang/Thread"); + java_lang_IllegalArgumentException = CacheClass(env, "java/lang/IllegalArgumentException"); + + java_lang_Thread_currentThread = CacheMethod(env, java_lang_Thread, true, "currentThread", + "()Ljava/lang/Thread;"); + java_lang_reflect_Method_invoke = CacheMethod(env, java_lang_reflect_Method, false, "invoke", + "(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;"); + java_lang_Class_getClassLoader = CacheMethod(env, java_lang_Class, + false, + "getClassLoader", + "()Ljava/lang/ClassLoader;"); + java_lang_reflect_AccessibleObject_setAccessible = CacheMethod(env, + java_lang_reflect_AccessibleObject, + false, + "setAccessible", + "(Z)V"); + + java_lang_Thread_nativePeer = CacheField(env, java_lang_Thread, false, "nativePeer", "J", true); +} + + +} // namespace art +} // namespace whale diff --git a/module/src/main/cpp/whale/src/android/art/well_known_classes.h b/module/src/main/cpp/whale/src/android/art/well_known_classes.h new file mode 100644 index 00000000..14a1729c --- /dev/null +++ b/module/src/main/cpp/whale/src/android/art/well_known_classes.h @@ -0,0 +1,31 @@ +#ifndef WHALE_ANDROID_ART_WELL_KNOWN_CLASSES_H_ +#define WHALE_ANDROID_ART_WELL_KNOWN_CLASSES_H_ + +#include + +namespace whale { +namespace art { + +struct WellKnownClasses { + static void Load(JNIEnv *env); + + static jclass java_lang_Object; + static jclass java_lang_reflect_Method; + static jclass java_lang_Class; + static jclass java_lang_ClassLoader; + static jclass java_lang_reflect_AccessibleObject; + static jclass java_lang_Thread; + static jclass java_lang_IllegalArgumentException; + + static jmethodID java_lang_reflect_Method_invoke; + static jmethodID java_lang_Class_getClassLoader; + static jmethodID java_lang_reflect_AccessibleObject_setAccessible; + static jmethodID java_lang_Thread_currentThread; + + static jfieldID java_lang_Thread_nativePeer; +}; + +} // namespace art +} // namespace whale + +#endif // WHALE_ANDROID_ART_WELL_KNOWN_CLASSES_H_ diff --git a/module/src/main/cpp/whale/src/android/jni_helper.h b/module/src/main/cpp/whale/src/android/jni_helper.h new file mode 100644 index 00000000..b2c6545a --- /dev/null +++ b/module/src/main/cpp/whale/src/android/jni_helper.h @@ -0,0 +1,73 @@ +#ifndef WHALE_ANDROID_ART_JNI_HELPER_H_ +#define WHALE_ANDROID_ART_JNI_HELPER_H_ + +#include +#include + +#define NATIVE_METHOD(className, functionName, signature) \ + { #functionName, signature, reinterpret_cast(className ## _ ## functionName) } + +#define NELEM(x) (sizeof(x)/sizeof((x)[0])) + +#define JNI_START JNIEnv *env, jclass cl + +static inline void JNIExceptionClear(JNIEnv *env) { + if (env->ExceptionCheck()) { + env->ExceptionClear(); + } +} + +static inline bool JNIExceptionCheck(JNIEnv *env) { + if (env->ExceptionCheck()) { + jthrowable e = env->ExceptionOccurred(); + env->Throw(e); + env->DeleteLocalRef(e); + return true; + } + return false; +} + +static inline void JNIExceptionClearAndDescribe(JNIEnv *env) { + if (env->ExceptionCheck()) { + env->ExceptionDescribe(); + env->ExceptionClear(); + } +} + +template +class ScopedLocalRef { + public: + ScopedLocalRef(JNIEnv *env, T localRef) : mEnv(env), mLocalRef(localRef) { + } + + ~ScopedLocalRef() { + reset(); + } + + void reset(T ptr = nullptr) { + if (ptr != mLocalRef) { + if (mLocalRef != nullptr) { + mEnv->DeleteLocalRef(mLocalRef); + } + mLocalRef = ptr; + } + } + + T release() { + T localRef = mLocalRef; + mLocalRef = nullptr; + return localRef; + } + + T get() const { + return mLocalRef; + } + + private: + JNIEnv *const mEnv; + T mLocalRef; + + DISALLOW_COPY_AND_ASSIGN(ScopedLocalRef); +}; + +#endif // WHALE_ANDROID_ART_JNI_HELPER_H_ diff --git a/module/src/main/cpp/whale/src/android/native_bridge.h b/module/src/main/cpp/whale/src/android/native_bridge.h new file mode 100644 index 00000000..385de37b --- /dev/null +++ b/module/src/main/cpp/whale/src/android/native_bridge.h @@ -0,0 +1,131 @@ +#ifndef WHALE_ANDROID_NATIVE_BRIDGE_H_ +#define WHALE_ANDROID_NATIVE_BRIDGE_H_ + +#include +#include +#include + +struct NativeBridgeRuntimeCallbacks; +struct NativeBridgeRuntimeValues; + +// Function pointer type for sigaction. This is mostly the signature of a signal handler, except +// for the return type. The runtime needs to know whether the signal was handled or should be given +// to the chain. +typedef bool (*NativeBridgeSignalHandlerFn)(int, siginfo_t *, void *); + +// Runtime interfaces to native bridge. +struct NativeBridgeRuntimeCallbacks { + // Get shorty of a Java method. The shorty is supposed to be persistent in memory. + // + // Parameters: + // env [IN] pointer to JNIenv. + // mid [IN] Java methodID. + // Returns: + // short descriptor for method. + const char *(*getMethodShorty)(JNIEnv *env, jmethodID mid); + + // Get number of native methods for specified class. + // + // Parameters: + // env [IN] pointer to JNIenv. + // clazz [IN] Java class object. + // Returns: + // number of native methods. + uint32_t (*getNativeMethodCount)(JNIEnv *env, jclass clazz); + + // Get at most 'method_count' native methods for specified class 'clazz'. Results are outputed + // via 'methods' [OUT]. The signature pointer in JNINativeMethod is reused as the method shorty. + // + // Parameters: + // env [IN] pointer to JNIenv. + // clazz [IN] Java class object. + // methods [OUT] array of method with the name, shorty, and fnPtr. + // method_count [IN] max number of elements in methods. + // Returns: + // number of method it actually wrote to methods. + uint32_t (*getNativeMethods)(JNIEnv *env, jclass clazz, JNINativeMethod *methods, + uint32_t method_count); +}; + +// Native bridge interfaces to runtime. +struct NativeBridgeCallbacks { + // Version number of the interface. + uint32_t version; + + // Initialize native bridge. Native bridge's internal implementation must ensure MT safety and + // that the native bridge is initialized only once. Thus it is OK to call this interface for an + // already initialized native bridge. + // + // Parameters: + // runtime_cbs [IN] the pointer to NativeBridgeRuntimeCallbacks. + // Returns: + // true iff initialization was successful. + bool (*initialize)(const NativeBridgeRuntimeCallbacks *runtime_cbs, const char *private_dir, + const char *instruction_set); + + // Load a shared library that is supported by the native bridge. + // + // Parameters: + // libpath [IN] path to the shared library + // flag [IN] the stardard RTLD_XXX defined in bionic dlfcn.h + // Returns: + // The opaque handle of the shared library if sucessful, otherwise NULL + void *(*loadLibrary)(const char *libpath, int flag); + + // Get a native bridge trampoline for specified native method. The trampoline has same + // sigature as the native method. + // + // Parameters: + // handle [IN] the handle returned from loadLibrary + // shorty [IN] short descriptor of native method + // len [IN] length of shorty + // Returns: + // address of trampoline if successful, otherwise NULL + void *(*getTrampoline)(void *handle, const char *name, const char *shorty, uint32_t len); + + // Check whether native library is valid and is for an ABI that is supported by native bridge. + // + // Parameters: + // libpath [IN] path to the shared library + // Returns: + // TRUE if library is supported by native bridge, FALSE otherwise + bool (*isSupported)(const char *libpath); + + // Provide environment values required by the app running with native bridge according to the + // instruction set. + // + // Parameters: + // instruction_set [IN] the instruction set of the app + // Returns: + // NULL if not supported by native bridge. + // Otherwise, return all environment values to be set after fork. + const struct NativeBridgeRuntimeValues *(*getAppEnv)(const char *instruction_set); + + // Added callbacks in version 2. + + // Check whether the bridge is compatible with the given version. A bridge may decide not to be + // forwards- or backwards-compatible, and libnativebridge will then stop using it. + // + // Parameters: + // bridge_version [IN] the version of libnativebridge. + // Returns: + // true iff the native bridge supports the given version of libnativebridge. + bool (*isCompatibleWith)(uint32_t bridge_version); + + // A callback to retrieve a native bridge's signal handler for the specified signal. The runtime + // will ensure that the signal handler is being called after the runtime's own handler, but before + // all chained handlers. The native bridge should not try to install the handler by itself, as + // that will potentially lead to cycles. + // + // Parameters: + // signal [IN] the signal for which the handler is asked for. Currently, only SIGSEGV is + // supported by the runtime. + // Returns: + // NULL if the native bridge doesn't use a handler or doesn't want it to be managed by the + // runtime. + // Otherwise, a pointer to the signal handler. + NativeBridgeSignalHandlerFn (*getSignalHandler)(int signal); +}; + + +#endif // WHALE_ANDROID_NATIVE_BRIDGE_H_ diff --git a/module/src/main/cpp/whale/src/assembler/assembler.cc b/module/src/main/cpp/whale/src/assembler/assembler.cc new file mode 100644 index 00000000..4d5ba35c --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/assembler.cc @@ -0,0 +1,64 @@ +#include "assembler/assembler.h" + +namespace whale { + + +AssemblerBuffer::AssemblerBuffer() { + static const size_t kInitialBufferCapacity = 4 * KB; + contents_ = reinterpret_cast(malloc(kInitialBufferCapacity)); + cursor_ = contents_; + limit_ = ComputeLimit(contents_, kInitialBufferCapacity); + fixup_ = nullptr; + slow_path_ = nullptr; + has_ensured_capacity_ = false; + fixups_processed_ = false; + // Verify internal state. + CHECK_EQ(Capacity(), kInitialBufferCapacity); + CHECK_EQ(Size(), 0U); +} + + +AssemblerBuffer::~AssemblerBuffer() { + free(contents_); +} + + +void AssemblerBuffer::ProcessFixups(const MemoryRegion ®ion) { + AssemblerFixup *fixup = fixup_; + while (fixup != nullptr) { + fixup->Process(region, fixup->position()); + fixup = fixup->previous(); + } +} + + +void AssemblerBuffer::FinalizeInstructions(const MemoryRegion &instructions) { + // Copy the instructions from the buffer. + MemoryRegion from(reinterpret_cast(contents()), Size()); + instructions.CopyFrom(0, from); + // Process fixups in the instructions. + ProcessFixups(instructions); + fixups_processed_ = true; +} + + +void AssemblerBuffer::ExtendCapacity(size_t min_capacity) { + size_t old_size = Size(); + size_t old_capacity = Capacity(); + DCHECK_GT(min_capacity, old_capacity); + size_t new_capacity = std::min(old_capacity * 2, old_capacity + 1 * MB); + new_capacity = std::max(new_capacity, min_capacity); + + // Allocate the new data area and copy contents of the old one to it. + contents_ = reinterpret_cast(realloc(contents_, new_capacity)); + + // Update the cursor and recompute the limit. + cursor_ = contents_ + old_size; + limit_ = ComputeLimit(contents_, new_capacity); + + // Verify internal state. + CHECK_EQ(Capacity(), new_capacity); + CHECK_EQ(Size(), old_size); +} + +} // namespace whale diff --git a/module/src/main/cpp/whale/src/assembler/assembler.h b/module/src/main/cpp/whale/src/assembler/assembler.h new file mode 100644 index 00000000..ed885f12 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/assembler.h @@ -0,0 +1,284 @@ +#ifndef WHALE_ASSEMBLER_ASSEMBLER_H_ +#define WHALE_ASSEMBLER_ASSEMBLER_H_ + +#include "assembler/label.h" +#include "assembler/memory_region.h" + +namespace whale { + +class Assembler; + +class AssemblerBuffer; + +class Label; + + +// Assembler fixups are positions in generated code that require processing +// after the code has been copied to executable memory. This includes building +// relocation information. +class AssemblerFixup { + public: + virtual void Process(const MemoryRegion ®ion, int position) = 0; + + virtual ~AssemblerFixup() = default; + + private: + AssemblerFixup *previous_; + int position_; + + AssemblerFixup *previous() const { return previous_; } + + void set_previous(AssemblerFixup *previous_in) { previous_ = previous_in; } + + int position() const { return position_; } + + void set_position(int position_in) { position_ = position_in; } + + friend class AssemblerBuffer; +}; + +class SlowPath { + public: + SlowPath() : next_(nullptr) {} + + virtual ~SlowPath() {} + + Label *Continuation() { return &continuation_; } + + Label *Entry() { return &entry_; } + + // Generate code for slow path + virtual void Emit(Assembler *sp_asm) = 0; + + protected: + // Entry branched to by fast path + Label entry_; + // Optional continuation that is branched to at the end of the slow path + Label continuation_; + // Next in linked list of slow paths + SlowPath *next_; + + private: + friend class AssemblerBuffer; + DISALLOW_COPY_AND_ASSIGN(SlowPath); +}; + +class AssemblerBuffer { + public: + explicit AssemblerBuffer(); + + ~AssemblerBuffer(); + + // Basic support for emitting, loading, and storing. + template + void Emit(T value) { + CHECK(HasEnsuredCapacity()); + *reinterpret_cast(cursor_) = value; + cursor_ += sizeof(T); + } + + template + T Load(size_t position) { + CHECK_LE(position, Size() - static_cast(sizeof(T))); + return *reinterpret_cast(contents_ + position); + } + + template + void Store(size_t position, T value) { + CHECK_LE(position, Size() - static_cast(sizeof(T))); + *reinterpret_cast(contents_ + position) = value; + } + + void Resize(size_t new_size) { + if (new_size > Capacity()) { + ExtendCapacity(new_size); + } + cursor_ = contents_ + new_size; + } + + void Move(size_t newposition, size_t oldposition, size_t size) { + // Move a chunk of the buffer from oldposition to newposition. + DCHECK_LE(oldposition + size, Size()); + DCHECK_LE(newposition + size, Size()); + memmove(contents_ + newposition, contents_ + oldposition, size); + } + + // Emit a fixup at the current location. + void EmitFixup(AssemblerFixup *fixup) { + fixup->set_previous(fixup_); + fixup->set_position(Size()); + fixup_ = fixup; + } + + void EnqueueSlowPath(SlowPath *slowpath) { + if (slow_path_ == nullptr) { + slow_path_ = slowpath; + } else { + SlowPath *cur = slow_path_; + for (; cur->next_ != nullptr; cur = cur->next_) {} + cur->next_ = slowpath; + } + } + + void EmitSlowPaths(Assembler *sp_asm) { + SlowPath *cur = slow_path_; + SlowPath *next = nullptr; + slow_path_ = nullptr; + for (; cur != nullptr; cur = next) { + cur->Emit(sp_asm); + next = cur->next_; + delete cur; + } + } + + // Get the size of the emitted code. + size_t Size() const { + CHECK_GE(cursor_, contents_); + return cursor_ - contents_; + } + + uint8_t *contents() const { return contents_; } + + // Copy the assembled instructions into the specified memory block + // and apply all fixups. + void FinalizeInstructions(const MemoryRegion ®ion); + + // To emit an instruction to the assembler buffer, the EnsureCapacity helper + // must be used to guarantee that the underlying data area is big enough to + // hold the emitted instruction. Usage: + // + // AssemblerBuffer buffer; + // AssemblerBuffer::EnsureCapacity ensured(&buffer); + // ... emit bytes for single instruction ... + + class EnsureCapacity { + public: + explicit EnsureCapacity(AssemblerBuffer *buffer) { + if (buffer->cursor() > buffer->limit()) { + buffer->ExtendCapacity(buffer->Size() + kMinimumGap); + } + // In debug mode, we save the assembler buffer along with the gap + // size before we start emitting to the buffer. This allows us to + // check that any single generated instruction doesn't overflow the + // limit implied by the minimum gap size. + buffer_ = buffer; + gap_ = ComputeGap(); + // Make sure that extending the capacity leaves a big enough gap + // for any kind of instruction. + CHECK_GE(gap_, kMinimumGap); + // Mark the buffer as having ensured the capacity. + CHECK(!buffer->HasEnsuredCapacity()); // Cannot nest. + buffer->has_ensured_capacity_ = true; + } + + ~EnsureCapacity() { + // Unmark the buffer, so we cannot emit after this. + buffer_->has_ensured_capacity_ = false; + // Make sure the generated instruction doesn't take up more + // space than the minimum gap. + size_t delta = gap_ - ComputeGap(); + CHECK_LE(delta, kMinimumGap); + } + + private: + AssemblerBuffer *buffer_; + size_t gap_; + + size_t ComputeGap() { return buffer_->Capacity() - buffer_->Size(); } + }; + + bool has_ensured_capacity_; + + bool HasEnsuredCapacity() const { return has_ensured_capacity_; } + + // Returns the position in the instruction stream. + size_t GetPosition() { return cursor_ - contents_; } + + size_t Capacity() const { + CHECK_GE(limit_, contents_); + return (limit_ - contents_) + kMinimumGap; + } + + // Unconditionally increase the capacity. + // The provided `min_capacity` must be higher than current `Capacity()`. + void ExtendCapacity(size_t min_capacity); + + private: + // The limit is set to kMinimumGap bytes before the end of the data area. + // This leaves enough space for the longest possible instruction and allows + // for a single, fast space check per instruction. + static const int kMinimumGap = 32; + + uint8_t *contents_; + uint8_t *cursor_; + uint8_t *limit_; + AssemblerFixup *fixup_; + bool fixups_processed_; + + // Head of linked list of slow paths + SlowPath *slow_path_; + + uint8_t *cursor() const { return cursor_; } + + uint8_t *limit() const { return limit_; } + + // Process the fixup chain starting at the given fixup. The offset is + // non-zero for fixups in the body if the preamble is non-empty. + void ProcessFixups(const MemoryRegion ®ion); + + // Compute the limit based on the data area and the capacity. See + // description of kMinimumGap for the reasoning behind the value. + static uint8_t *ComputeLimit(uint8_t *data, size_t capacity) { + return data + capacity - kMinimumGap; + } + + friend class AssemblerFixup; +}; + +class Assembler { + public: + // Finalize the code; emit slow paths, fixup branches, add literal pool, etc. + virtual void FinalizeCode() { buffer_.EmitSlowPaths(this); } + + // Size of generated code + virtual size_t CodeSize() const { return buffer_.Size(); } + + virtual const uint8_t *CodeBufferBaseAddress() const { return buffer_.contents(); } + + // CodePosition() is a non-const method similar to CodeSize(), which is used to + // record positions within the code buffer for the purpose of signal handling + // (stack overflow checks and implicit null checks may trigger signals and the + // signal handlers expect them right before the recorded positions). + // On most architectures CodePosition() should be equivalent to CodeSize(), but + // the MIPS assembler needs to be aware of this recording, so it doesn't put + // the instructions that can trigger signals into branch delay slots. Handling + // signals from instructions in delay slots is a bit problematic and should be + // avoided. + virtual size_t CodePosition() { return CodeSize(); } + + // Copy instructions out of assembly buffer into the given region of memory + virtual void FinalizeInstructions(const MemoryRegion ®ion) { + buffer_.FinalizeInstructions(region); + } + + virtual void Comment(const char *format, ...) {} + + virtual void Bind(Label *label) = 0; + + virtual void Jump(Label *label) = 0; + + virtual ~Assembler() = default; + + AssemblerBuffer *GetBuffer() { + return &buffer_; + } + + protected: + explicit Assembler() : buffer_() {} + + AssemblerBuffer buffer_; +}; + +} // namespace whale + +#endif // WHALE_ASSEMBLER_ASSEMBLER_H_ diff --git a/module/src/main/cpp/whale/src/assembler/label.h b/module/src/main/cpp/whale/src/assembler/label.h new file mode 100644 index 00000000..3e6f978c --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/label.h @@ -0,0 +1,108 @@ +#ifndef WHALE_ASSEMBLER_LABEL_H_ +#define WHALE_ASSEMBLER_LABEL_H_ + +#include +#include "base/logging.h" + +namespace whale { + +class Assembler; + +class AssemblerBuffer; + +class AssemblerFixup; + +namespace x86 { +class X86Assembler; + +class NearLabel; +} // namespace x86 +namespace x86_64 { +class X86_64Assembler; + +class NearLabel; +} // namespace x86_64 + +class ExternalLabel { + public: + ExternalLabel(const char *name_in, uintptr_t address_in) + : name_(name_in), address_(address_in) { + DCHECK(name_in != nullptr); + } + + const char *name() const { return name_; } + + uintptr_t address() const { + return address_; + } + + private: + const char *name_; + const uintptr_t address_; +}; + +class Label { + public: + Label() : position_(0) {} + + Label(Label &&src) + : position_(src.position_) { + // We must unlink/unbind the src label when moving; if not, calling the destructor on + // the src label would fail. + src.position_ = 0; + } + + ~Label() { + // Assert if label is being destroyed with unresolved branches pending. + CHECK(!IsLinked()); + } + + // Returns the position for bound and linked labels. Cannot be used + // for unused labels. + int Position() const { + CHECK(!IsUnused()); + return IsBound() ? -position_ - sizeof(void *) : position_ - sizeof(void *); + } + + int LinkPosition() const { + CHECK(IsLinked()); + return position_ - sizeof(void *); + } + + bool IsBound() const { return position_ < 0; } + + bool IsUnused() const { return position_ == 0; } + + bool IsLinked() const { return position_ > 0; } + + private: + + int position_; + + void Reinitialize() { + position_ = 0; + } + + void BindTo(int position) { + position_ = -position - sizeof(void *); + } + + void LinkTo(int position) { + position_ = position + sizeof(void *); + } + + friend class x86::X86Assembler; + + friend class x86::NearLabel; + + friend class x86_64::X86_64Assembler; + + friend class x86_64::NearLabel; + + DISALLOW_COPY_AND_ASSIGN(Label); +}; + + +} // namespace whale + +#endif // WHALE_ASSEMBLER_LABEL_H_ diff --git a/module/src/main/cpp/whale/src/assembler/managed_register.h b/module/src/main/cpp/whale/src/assembler/managed_register.h new file mode 100644 index 00000000..fa404e3e --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/managed_register.h @@ -0,0 +1,44 @@ +#ifndef MYAPPLICATION_MANAGED_REGISTER_H +#define MYAPPLICATION_MANAGED_REGISTER_H + +#include "assembler/value_object.h" + +namespace whale { + +class ManagedRegister : public ValueObject { + public: + // ManagedRegister is a value class. There exists no method to change the + // internal state. We therefore allow a copy constructor and an + // assignment-operator. + constexpr ManagedRegister(const ManagedRegister &other) = default; + + ManagedRegister &operator=(const ManagedRegister &other) = default; + + // It is valid to invoke Equals on and with a NoRegister. + constexpr bool Equals(const ManagedRegister &other) const { + return id_ == other.id_; + } + + constexpr bool IsNoRegister() const { + return id_ == kNoRegister; + } + + static constexpr ManagedRegister NoRegister() { + return ManagedRegister(); + } + + constexpr int RegId() const { return id_; } + + explicit constexpr ManagedRegister(int reg_id) : id_(reg_id) {} + + protected: + static const int kNoRegister = -1; + + constexpr ManagedRegister() : id_(kNoRegister) {} + + int id_; +}; + +} // namespace whale + +#endif //MYAPPLICATION_MANAGED_REGISTER_H diff --git a/module/src/main/cpp/whale/src/assembler/memory_region.cc b/module/src/main/cpp/whale/src/assembler/memory_region.cc new file mode 100644 index 00000000..bd305be0 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/memory_region.cc @@ -0,0 +1,14 @@ +#include +#include "assembler/memory_region.h" + +namespace whale { + +void MemoryRegion::CopyFrom(size_t offset, const MemoryRegion &from) const { + CHECK(from.pointer() != nullptr); + CHECK_GT(from.size(), 0U); + CHECK_GE(this->size(), from.size()); + CHECK_LE(offset, this->size() - from.size()); + memmove(reinterpret_cast(begin() + offset), from.pointer(), from.size()); +} + +} // namespace whale diff --git a/module/src/main/cpp/whale/src/assembler/memory_region.h b/module/src/main/cpp/whale/src/assembler/memory_region.h new file mode 100644 index 00000000..7b1d006c --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/memory_region.h @@ -0,0 +1,145 @@ +#ifndef WHALE_ASSEMBLER_MEMORY_REGION_H_ +#define WHALE_ASSEMBLER_MEMORY_REGION_H_ + +#include +#include +#include "assembler/value_object.h" +#include +#include +#include +#include + +namespace whale { + +class MemoryRegion final : public ValueObject { + public: + struct ContentEquals { + constexpr bool operator()(const MemoryRegion &lhs, const MemoryRegion &rhs) const { + return lhs.size() == rhs.size() && memcmp(lhs.begin(), rhs.begin(), lhs.size()) == 0; + } + }; + + MemoryRegion() : pointer_(nullptr), size_(0) {} + + MemoryRegion(void *pointer_in, uintptr_t size_in) : pointer_(pointer_in), size_(size_in) {} + + void *pointer() const { return pointer_; } + + size_t size() const { return size_; } + + size_t size_in_bits() const { return size_ * kBitsPerByte; } + + static size_t pointer_offset() { + return OFFSETOF_MEMBER(MemoryRegion, pointer_); + } + + uint8_t *begin() const { return reinterpret_cast(pointer_); } + + uint8_t *end() const { return begin() + size_; } + + // Load value of type `T` at `offset`. The memory address corresponding + // to `offset` should be word-aligned (on ARM, this is a requirement). + template + ALWAYS_INLINE T Load(uintptr_t offset) const { + T *address = ComputeInternalPointer(offset); + DCHECK(IsWordAligned(address)); + return *address; + } + + // Store `value` (of type `T`) at `offset`. The memory address + // corresponding to `offset` should be word-aligned (on ARM, this is + // a requirement). + template + ALWAYS_INLINE void Store(uintptr_t offset, T value) const { + T *address = ComputeInternalPointer(offset); + DCHECK(IsWordAligned(address)); + *address = value; + } + + // Load value of type `T` at `offset`. The memory address corresponding + // to `offset` does not need to be word-aligned. + template + ALWAYS_INLINE T LoadUnaligned(uintptr_t offset) const { + // Equivalent unsigned integer type corresponding to T. + typedef typename std::make_unsigned::type U; + U equivalent_unsigned_integer_value = 0; + // Read the value byte by byte in a little-endian fashion. + for (size_t i = 0; i < sizeof(U); ++i) { + equivalent_unsigned_integer_value += + *ComputeInternalPointer(offset + i) << (i * kBitsPerByte); + } + return bit_cast(equivalent_unsigned_integer_value); + } + + // Store `value` (of type `T`) at `offset`. The memory address + // corresponding to `offset` does not need to be word-aligned. + template + ALWAYS_INLINE void StoreUnaligned(uintptr_t offset, T value) const { + // Equivalent unsigned integer type corresponding to T. + typedef typename std::make_unsigned::type U; + U equivalent_unsigned_integer_value = bit_cast(value); + // Write the value byte by byte in a little-endian fashion. + for (size_t i = 0; i < sizeof(U); ++i) { + *ComputeInternalPointer(offset + i) = + (equivalent_unsigned_integer_value >> (i * kBitsPerByte)) & 0xFF; + } + } + + template + ALWAYS_INLINE T *PointerTo(uintptr_t offset) const { + return ComputeInternalPointer(offset); + } + + void CopyFrom(size_t offset, const MemoryRegion &from) const; + + template + void CopyFromVector(size_t offset, Vector &vector) const { + if (!vector.empty()) { + CopyFrom(offset, MemoryRegion(vector.data(), vector.size())); + } + } + + // Compute a sub memory region based on an existing one. + ALWAYS_INLINE MemoryRegion Subregion(uintptr_t offset, uintptr_t size_in) const { + CHECK_GE(this->size(), size_in); + CHECK_LE(offset, this->size() - size_in); + return MemoryRegion(reinterpret_cast(begin() + offset), size_in); + } + + // Compute an extended memory region based on an existing one. + ALWAYS_INLINE void Extend(const MemoryRegion ®ion, uintptr_t extra) { + pointer_ = region.pointer(); + size_ = (region.size() + extra); + } + + private: + template + ALWAYS_INLINE T *ComputeInternalPointer(size_t offset) const { + CHECK_GE(size(), sizeof(T)); + CHECK_LE(offset, size() - sizeof(T)); + return reinterpret_cast(begin() + offset); + } + + // Locate the bit with the given offset. Returns a pointer to the byte + // containing the bit, and sets bit_mask to the bit within that byte. + ALWAYS_INLINE uint8_t *ComputeBitPointer(uintptr_t bit_offset, uint8_t *bit_mask) const { + uintptr_t bit_remainder = (bit_offset & (kBitsPerByte - 1)); + *bit_mask = (1U << bit_remainder); + uintptr_t byte_offset = (bit_offset >> kBitsPerByteLog2); + return ComputeInternalPointer(byte_offset); + } + + // Is `address` aligned on a machine word? + template + static constexpr bool IsWordAligned(const T *address) { + // Word alignment in bytes. Determined from pointer size. + return IsAligned(address); + } + + void *pointer_; + size_t size_; +}; + +} // namespace whale + +#endif // WHALE_ASSEMBLER_MEMORY_REGION_H_ diff --git a/module/src/main/cpp/whale/src/assembler/value_object.h b/module/src/main/cpp/whale/src/assembler/value_object.h new file mode 100644 index 00000000..32cfa1ae --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/value_object.h @@ -0,0 +1,15 @@ +#ifndef WHALE_ASSEMBLER_VALUE_OBJECT_H_ +#define WHALE_ASSEMBLER_VALUE_OBJECT_H_ + +#include "base/macros.h" + +namespace whale { + +class ValueObject { + private: + DISALLOW_ALLOCATION(); +}; + +} // namespace whale + +#endif // WHALE_ASSEMBLER_VALUE_OBJECT_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/CMakeLists.txt b/module/src/main/cpp/whale/src/assembler/vixl/CMakeLists.txt new file mode 100644 index 00000000..e7c5a24d --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/CMakeLists.txt @@ -0,0 +1,52 @@ +set(CMAKE_CXX_STANDARD 14) + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Werror -fdiagnostics-show-option -Wextra -Wredundant-decls -pedantic -Wwrite-strings -Wunused") + +add_definitions(-DVIXL_CODE_BUFFER_MALLOC) + +set(VIXL_SOURCES + code-buffer-vixl.cc + compiler-intrinsics-vixl.cc + cpu-features.cc + utils-vixl.cc) + +set(VIXL_AARCH32 + aarch32/assembler-aarch32.cc + aarch32/constants-aarch32.cc + aarch32/instructions-aarch32.cc + aarch32/location-aarch32.cc + aarch32/macro-assembler-aarch32.cc + aarch32/operands-aarch32.cc + ) + +set(VIXL_AARCH64 + aarch64/assembler-aarch64.cc + aarch64/cpu-aarch64.cc + aarch64/cpu-features-auditor-aarch64.cc + aarch64/decoder-aarch64.cc + aarch64/instructions-aarch64.cc + aarch64/instrument-aarch64.cc + aarch64/logic-aarch64.cc + aarch64/macro-assembler-aarch64.cc + aarch64/operands-aarch64.cc + aarch64/pointer-auth-aarch64.cc + aarch64/simulator-aarch64.cc + ) + + +if (ENABLE_SIMULATOR) + add_definitions(-DVIXL_INCLUDE_SIMULATOR_AARCH64) + set(VIXL_SOURCES ${VIXL_SOURCES} ${VIXL_AARCH32} ${VIXL_AARCH64}) +endif () + +if (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") + add_definitions("-DVIXL_INCLUDE_TARGET_A32") + add_definitions("-DVIXL_INCLUDE_TARGET_T32") + set(VIXL_SOURCES ${VIXL_SOURCES} ${VIXL_AARCH32}) +elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^aarch64") + set(VIXL_SOURCES ${VIXL_SOURCES} ${VIXL_AARCH64}) +endif () + +if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm|aarch64)") + add_library(vixl ${VIXL_SOURCES}) +endif () diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch32/assembler-aarch32.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/assembler-aarch32.cc new file mode 100644 index 00000000..5f636981 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/assembler-aarch32.cc @@ -0,0 +1,27923 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +extern "C" { +#include +} + +#include +#include +#include +#include +#include + +#include "utils-vixl.h" +#include "aarch32/assembler-aarch32.h" +#include "aarch32/constants-aarch32.h" +#include "aarch32/instructions-aarch32.h" +#include "aarch32/operands-aarch32.h" + +namespace vixl { +namespace aarch32 { + +void Assembler::EmitT32_16(uint16_t instr) { + VIXL_ASSERT(buffer_.Is16bitAligned()); + buffer_.Emit16(instr); +} + + +void Assembler::EmitT32_32(uint32_t instr) { + VIXL_ASSERT(buffer_.Is16bitAligned()); + buffer_.Emit16(static_cast(instr >> 16)); + buffer_.Emit16(static_cast(instr & 0xffff)); +} + + +void Assembler::EmitA32(uint32_t instr) { + VIXL_ASSERT(buffer_.Is32bitAligned()); + buffer_.Emit32(instr); +} + + +#ifdef VIXL_DEBUG +void Assembler::PerformCheckIT(Condition condition) { + if (it_mask_ == 0) { + VIXL_ASSERT(IsUsingA32() || condition.Is(al)); + } else { + VIXL_ASSERT(condition.Is(first_condition_)); + // For A32, AdavanceIT() is not called by the assembler. We must call it + // in order to check that IT instructions are used consistently with + // the following conditional instructions. + if (IsUsingA32()) AdvanceIT(); + } +} +#endif + + +void Assembler::BindHelper(Label* label) { + VIXL_ASSERT(!label->IsBound()); + label->SetLocation(this, GetCursorOffset()); + label->MarkBound(); +} + +uint32_t Assembler::Link(uint32_t instr, + Location* location, + const Location::EmitOperator& op, + const ReferenceInfo* info) { + location->SetReferenced(); + if (location->IsBound()) { + return op.Encode(instr, GetCursorOffset(), location); + } + location->AddForwardRef(GetCursorOffset(), op, info); + return instr; +} + + +// Start of generated code. +class Dt_L_imm6_1 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_L_imm6_1(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_L_imm6_1::Dt_L_imm6_1(DataType dt) { + switch (dt.GetValue()) { + case S8: + type_ = 0x0; + SetEncodingValue(0x1); + break; + case U8: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S16: + type_ = 0x0; + SetEncodingValue(0x2); + break; + case U16: + type_ = 0x1; + SetEncodingValue(0x2); + break; + case S32: + type_ = 0x0; + SetEncodingValue(0x4); + break; + case U32: + type_ = 0x1; + SetEncodingValue(0x4); + break; + case S64: + type_ = 0x0; + SetEncodingValue(0x8); + break; + case U64: + type_ = 0x1; + SetEncodingValue(0x8); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_L_imm6_2 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_L_imm6_2(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_L_imm6_2::Dt_L_imm6_2(DataType dt) { + switch (dt.GetValue()) { + case S8: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S16: + type_ = 0x1; + SetEncodingValue(0x2); + break; + case S32: + type_ = 0x1; + SetEncodingValue(0x4); + break; + case S64: + type_ = 0x1; + SetEncodingValue(0x8); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_L_imm6_3 : public EncodingValue { + public: + explicit Dt_L_imm6_3(DataType dt); +}; + +Dt_L_imm6_3::Dt_L_imm6_3(DataType dt) { + switch (dt.GetValue()) { + case I8: + SetEncodingValue(0x1); + break; + case I16: + SetEncodingValue(0x2); + break; + case I32: + SetEncodingValue(0x4); + break; + case I64: + SetEncodingValue(0x8); + break; + default: + break; + } +} + +class Dt_L_imm6_4 : public EncodingValue { + public: + explicit Dt_L_imm6_4(DataType dt); +}; + +Dt_L_imm6_4::Dt_L_imm6_4(DataType dt) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x1); + break; + case Untyped16: + SetEncodingValue(0x2); + break; + case Untyped32: + SetEncodingValue(0x4); + break; + case Untyped64: + SetEncodingValue(0x8); + break; + default: + break; + } +} + +class Dt_imm6_1 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_imm6_1(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_imm6_1::Dt_imm6_1(DataType dt) { + switch (dt.GetValue()) { + case S16: + type_ = 0x0; + SetEncodingValue(0x1); + break; + case U16: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S32: + type_ = 0x0; + SetEncodingValue(0x2); + break; + case U32: + type_ = 0x1; + SetEncodingValue(0x2); + break; + case S64: + type_ = 0x0; + SetEncodingValue(0x4); + break; + case U64: + type_ = 0x1; + SetEncodingValue(0x4); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_imm6_2 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_imm6_2(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_imm6_2::Dt_imm6_2(DataType dt) { + switch (dt.GetValue()) { + case S16: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S32: + type_ = 0x1; + SetEncodingValue(0x2); + break; + case S64: + type_ = 0x1; + SetEncodingValue(0x4); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_imm6_3 : public EncodingValue { + public: + explicit Dt_imm6_3(DataType dt); +}; + +Dt_imm6_3::Dt_imm6_3(DataType dt) { + switch (dt.GetValue()) { + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + case I64: + SetEncodingValue(0x4); + break; + default: + break; + } +} + +class Dt_imm6_4 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_imm6_4(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_imm6_4::Dt_imm6_4(DataType dt) { + switch (dt.GetValue()) { + case S8: + type_ = 0x0; + SetEncodingValue(0x1); + break; + case U8: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S16: + type_ = 0x0; + SetEncodingValue(0x2); + break; + case U16: + type_ = 0x1; + SetEncodingValue(0x2); + break; + case S32: + type_ = 0x0; + SetEncodingValue(0x4); + break; + case U32: + type_ = 0x1; + SetEncodingValue(0x4); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_op_U_size_1 : public EncodingValue { + public: + explicit Dt_op_U_size_1(DataType dt); +}; + +Dt_op_U_size_1::Dt_op_U_size_1(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x0); + break; + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + case U8: + SetEncodingValue(0x4); + break; + case U16: + SetEncodingValue(0x5); + break; + case U32: + SetEncodingValue(0x6); + break; + case P8: + SetEncodingValue(0x8); + break; + case P64: + SetEncodingValue(0xa); + break; + default: + break; + } +} + +class Dt_op_size_1 : public EncodingValue { + public: + explicit Dt_op_size_1(DataType dt); +}; + +Dt_op_size_1::Dt_op_size_1(DataType dt) { + switch (dt.GetValue()) { + case I8: + SetEncodingValue(0x0); + break; + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + case P8: + SetEncodingValue(0x4); + break; + default: + break; + } +} + +class Dt_op_size_2 : public EncodingValue { + public: + explicit Dt_op_size_2(DataType dt); +}; + +Dt_op_size_2::Dt_op_size_2(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x0); + break; + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + case U8: + SetEncodingValue(0x4); + break; + case U16: + SetEncodingValue(0x5); + break; + case U32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_op_size_3 : public EncodingValue { + public: + explicit Dt_op_size_3(DataType dt); +}; + +Dt_op_size_3::Dt_op_size_3(DataType dt) { + switch (dt.GetValue()) { + case S16: + SetEncodingValue(0x0); + break; + case S32: + SetEncodingValue(0x1); + break; + case S64: + SetEncodingValue(0x2); + break; + case U16: + SetEncodingValue(0x4); + break; + case U32: + SetEncodingValue(0x5); + break; + case U64: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_U_imm3H_1 : public EncodingValue { + public: + explicit Dt_U_imm3H_1(DataType dt); +}; + +Dt_U_imm3H_1::Dt_U_imm3H_1(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x1); + break; + case S16: + SetEncodingValue(0x2); + break; + case S32: + SetEncodingValue(0x4); + break; + case U8: + SetEncodingValue(0x9); + break; + case U16: + SetEncodingValue(0xa); + break; + case U32: + SetEncodingValue(0xc); + break; + default: + break; + } +} + +class Dt_U_opc1_opc2_1 : public EncodingValue { + public: + explicit Dt_U_opc1_opc2_1(DataType dt, const DRegisterLane& lane); +}; + +Dt_U_opc1_opc2_1::Dt_U_opc1_opc2_1(DataType dt, const DRegisterLane& lane) { + switch (dt.GetValue()) { + case S8: + if ((lane.GetLane() & 7) != lane.GetLane()) { + return; + } + SetEncodingValue(0x8 | lane.GetLane()); + break; + case S16: + if ((lane.GetLane() & 3) != lane.GetLane()) { + return; + } + SetEncodingValue(0x1 | (lane.GetLane() << 1)); + break; + case U8: + if ((lane.GetLane() & 7) != lane.GetLane()) { + return; + } + SetEncodingValue(0x18 | lane.GetLane()); + break; + case U16: + if ((lane.GetLane() & 3) != lane.GetLane()) { + return; + } + SetEncodingValue(0x11 | (lane.GetLane() << 1)); + break; + case Untyped32: + if ((lane.GetLane() & 1) != lane.GetLane()) { + return; + } + SetEncodingValue(0x0 | (lane.GetLane() << 2)); + break; + case kDataTypeValueNone: + if ((lane.GetLane() & 1) != lane.GetLane()) { + return; + } + SetEncodingValue(0x0 | (lane.GetLane() << 2)); + break; + default: + break; + } +} + +class Dt_opc1_opc2_1 : public EncodingValue { + public: + explicit Dt_opc1_opc2_1(DataType dt, const DRegisterLane& lane); +}; + +Dt_opc1_opc2_1::Dt_opc1_opc2_1(DataType dt, const DRegisterLane& lane) { + switch (dt.GetValue()) { + case Untyped8: + if ((lane.GetLane() & 7) != lane.GetLane()) { + return; + } + SetEncodingValue(0x8 | lane.GetLane()); + break; + case Untyped16: + if ((lane.GetLane() & 3) != lane.GetLane()) { + return; + } + SetEncodingValue(0x1 | (lane.GetLane() << 1)); + break; + case Untyped32: + if ((lane.GetLane() & 1) != lane.GetLane()) { + return; + } + SetEncodingValue(0x0 | (lane.GetLane() << 2)); + break; + case kDataTypeValueNone: + if ((lane.GetLane() & 1) != lane.GetLane()) { + return; + } + SetEncodingValue(0x0 | (lane.GetLane() << 2)); + break; + default: + break; + } +} + +class Dt_imm4_1 : public EncodingValue { + public: + explicit Dt_imm4_1(DataType dt, const DRegisterLane& lane); +}; + +Dt_imm4_1::Dt_imm4_1(DataType dt, const DRegisterLane& lane) { + switch (dt.GetValue()) { + case Untyped8: + if ((lane.GetLane() & 7) != lane.GetLane()) { + return; + } + SetEncodingValue(0x1 | (lane.GetLane() << 1)); + break; + case Untyped16: + if ((lane.GetLane() & 3) != lane.GetLane()) { + return; + } + SetEncodingValue(0x2 | (lane.GetLane() << 2)); + break; + case Untyped32: + if ((lane.GetLane() & 1) != lane.GetLane()) { + return; + } + SetEncodingValue(0x4 | (lane.GetLane() << 3)); + break; + default: + break; + } +} + +class Dt_B_E_1 : public EncodingValue { + public: + explicit Dt_B_E_1(DataType dt); +}; + +Dt_B_E_1::Dt_B_E_1(DataType dt) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x2); + break; + case Untyped16: + SetEncodingValue(0x1); + break; + case Untyped32: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Dt_op_1 : public EncodingValue { + public: + Dt_op_1(DataType dt1, DataType dt2); +}; + +Dt_op_1::Dt_op_1(DataType dt1, DataType dt2) { + if ((dt1.GetValue() == F32) && (dt2.GetValue() == S32)) { + SetEncodingValue(0x0); + return; + } + if ((dt1.GetValue() == F32) && (dt2.GetValue() == U32)) { + SetEncodingValue(0x1); + return; + } + if ((dt1.GetValue() == S32) && (dt2.GetValue() == F32)) { + SetEncodingValue(0x2); + return; + } + if ((dt1.GetValue() == U32) && (dt2.GetValue() == F32)) { + SetEncodingValue(0x3); + return; + } +} + +class Dt_op_2 : public EncodingValue { + public: + explicit Dt_op_2(DataType dt); +}; + +Dt_op_2::Dt_op_2(DataType dt) { + switch (dt.GetValue()) { + case U32: + SetEncodingValue(0x0); + break; + case S32: + SetEncodingValue(0x1); + break; + default: + break; + } +} + +class Dt_op_3 : public EncodingValue { + public: + explicit Dt_op_3(DataType dt); +}; + +Dt_op_3::Dt_op_3(DataType dt) { + switch (dt.GetValue()) { + case S32: + SetEncodingValue(0x0); + break; + case U32: + SetEncodingValue(0x1); + break; + default: + break; + } +} + +class Dt_U_sx_1 : public EncodingValue { + public: + explicit Dt_U_sx_1(DataType dt); +}; + +Dt_U_sx_1::Dt_U_sx_1(DataType dt) { + switch (dt.GetValue()) { + case S16: + SetEncodingValue(0x0); + break; + case S32: + SetEncodingValue(0x1); + break; + case U16: + SetEncodingValue(0x2); + break; + case U32: + SetEncodingValue(0x3); + break; + default: + break; + } +} + +class Dt_op_U_1 : public EncodingValue { + public: + Dt_op_U_1(DataType dt1, DataType dt2); +}; + +Dt_op_U_1::Dt_op_U_1(DataType dt1, DataType dt2) { + if ((dt1.GetValue() == F32) && (dt2.GetValue() == S32)) { + SetEncodingValue(0x0); + return; + } + if ((dt1.GetValue() == F32) && (dt2.GetValue() == U32)) { + SetEncodingValue(0x1); + return; + } + if ((dt1.GetValue() == S32) && (dt2.GetValue() == F32)) { + SetEncodingValue(0x2); + return; + } + if ((dt1.GetValue() == U32) && (dt2.GetValue() == F32)) { + SetEncodingValue(0x3); + return; + } +} + +class Dt_sz_1 : public EncodingValue { + public: + explicit Dt_sz_1(DataType dt); +}; + +Dt_sz_1::Dt_sz_1(DataType dt) { + switch (dt.GetValue()) { + case F32: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Dt_F_size_1 : public EncodingValue { + public: + explicit Dt_F_size_1(DataType dt); +}; + +Dt_F_size_1::Dt_F_size_1(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x0); + break; + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + case F32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_F_size_2 : public EncodingValue { + public: + explicit Dt_F_size_2(DataType dt); +}; + +Dt_F_size_2::Dt_F_size_2(DataType dt) { + switch (dt.GetValue()) { + case I8: + SetEncodingValue(0x0); + break; + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + case F32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_F_size_3 : public EncodingValue { + public: + explicit Dt_F_size_3(DataType dt); +}; + +Dt_F_size_3::Dt_F_size_3(DataType dt) { + switch (dt.GetValue()) { + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + case F32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_F_size_4 : public EncodingValue { + public: + explicit Dt_F_size_4(DataType dt); +}; + +Dt_F_size_4::Dt_F_size_4(DataType dt) { + switch (dt.GetValue()) { + case U32: + SetEncodingValue(0x2); + break; + case F32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_U_size_1 : public EncodingValue { + public: + explicit Dt_U_size_1(DataType dt); +}; + +Dt_U_size_1::Dt_U_size_1(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x0); + break; + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + case U8: + SetEncodingValue(0x4); + break; + case U16: + SetEncodingValue(0x5); + break; + case U32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_U_size_2 : public EncodingValue { + public: + explicit Dt_U_size_2(DataType dt); +}; + +Dt_U_size_2::Dt_U_size_2(DataType dt) { + switch (dt.GetValue()) { + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + case U16: + SetEncodingValue(0x5); + break; + case U32: + SetEncodingValue(0x6); + break; + default: + break; + } +} + +class Dt_U_size_3 : public EncodingValue { + public: + explicit Dt_U_size_3(DataType dt); +}; + +Dt_U_size_3::Dt_U_size_3(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x0); + break; + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + case S64: + SetEncodingValue(0x3); + break; + case U8: + SetEncodingValue(0x4); + break; + case U16: + SetEncodingValue(0x5); + break; + case U32: + SetEncodingValue(0x6); + break; + case U64: + SetEncodingValue(0x7); + break; + default: + break; + } +} + +class Dt_size_1 : public EncodingValue { + public: + explicit Dt_size_1(DataType dt); +}; + +Dt_size_1::Dt_size_1(DataType dt) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Dt_size_2 : public EncodingValue { + public: + explicit Dt_size_2(DataType dt); +}; + +Dt_size_2::Dt_size_2(DataType dt) { + switch (dt.GetValue()) { + case I8: + SetEncodingValue(0x0); + break; + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + case I64: + SetEncodingValue(0x3); + break; + default: + break; + } +} + +class Dt_size_3 : public EncodingValue { + public: + explicit Dt_size_3(DataType dt); +}; + +Dt_size_3::Dt_size_3(DataType dt) { + switch (dt.GetValue()) { + case I16: + SetEncodingValue(0x0); + break; + case I32: + SetEncodingValue(0x1); + break; + case I64: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_4 : public EncodingValue { + public: + explicit Dt_size_4(DataType dt); +}; + +Dt_size_4::Dt_size_4(DataType dt) { + switch (dt.GetValue()) { + case I8: + SetEncodingValue(0x0); + break; + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_5 : public EncodingValue { + public: + explicit Dt_size_5(DataType dt); +}; + +Dt_size_5::Dt_size_5(DataType dt) { + switch (dt.GetValue()) { + case S8: + SetEncodingValue(0x0); + break; + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_6 : public EncodingValue { + public: + explicit Dt_size_6(DataType dt); +}; + +Dt_size_6::Dt_size_6(DataType dt) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x0); + break; + case Untyped16: + SetEncodingValue(0x1); + break; + case Untyped32: + SetEncodingValue(0x2); + break; + case Untyped64: + SetEncodingValue(0x3); + break; + default: + break; + } +} + +class Dt_size_7 : public EncodingValue { + public: + explicit Dt_size_7(DataType dt); +}; + +Dt_size_7::Dt_size_7(DataType dt) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x0); + break; + case Untyped16: + SetEncodingValue(0x1); + break; + case Untyped32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_8 : public EncodingValue { + public: + Dt_size_8(DataType dt, Alignment align); +}; + +Dt_size_8::Dt_size_8(DataType dt, Alignment align) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x0); + break; + case Untyped16: + SetEncodingValue(0x1); + break; + case Untyped32: + if (align.Is(k64BitAlign) || align.Is(kNoAlignment)) { + SetEncodingValue(0x2); + } else if (align.Is(k128BitAlign)) { + SetEncodingValue(0x3); + } + break; + default: + break; + } +} + +class Dt_size_9 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_size_9(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_size_9::Dt_size_9(DataType dt) { + switch (dt.GetValue()) { + case I16: + type_ = 0x0; + SetEncodingValue(0x1); + break; + case I32: + type_ = 0x0; + SetEncodingValue(0x2); + break; + case F32: + type_ = 0x1; + SetEncodingValue(0x2); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_size_10 : public EncodingValue { + public: + explicit Dt_size_10(DataType dt); +}; + +Dt_size_10::Dt_size_10(DataType dt) { + switch (dt.GetValue()) { + case S8: + case U8: + case I8: + SetEncodingValue(0x0); + break; + case S16: + case U16: + case I16: + SetEncodingValue(0x1); + break; + case S32: + case U32: + case I32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_11 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_size_11(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_size_11::Dt_size_11(DataType dt) { + switch (dt.GetValue()) { + case S16: + type_ = 0x0; + SetEncodingValue(0x1); + break; + case U16: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S32: + type_ = 0x0; + SetEncodingValue(0x2); + break; + case U32: + type_ = 0x1; + SetEncodingValue(0x2); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_size_12 : public EncodingValue { + uint32_t type_; + + public: + explicit Dt_size_12(DataType dt); + uint32_t GetTypeEncodingValue() const { return type_; } +}; + +Dt_size_12::Dt_size_12(DataType dt) { + switch (dt.GetValue()) { + case S8: + type_ = 0x0; + SetEncodingValue(0x0); + break; + case U8: + type_ = 0x1; + SetEncodingValue(0x0); + break; + case S16: + type_ = 0x0; + SetEncodingValue(0x1); + break; + case U16: + type_ = 0x1; + SetEncodingValue(0x1); + break; + case S32: + type_ = 0x0; + SetEncodingValue(0x2); + break; + case U32: + type_ = 0x1; + SetEncodingValue(0x2); + break; + default: + VIXL_UNREACHABLE(); + type_ = 0x0; + break; + } +} + +class Dt_size_13 : public EncodingValue { + public: + explicit Dt_size_13(DataType dt); +}; + +Dt_size_13::Dt_size_13(DataType dt) { + switch (dt.GetValue()) { + case S16: + SetEncodingValue(0x1); + break; + case S32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_14 : public EncodingValue { + public: + explicit Dt_size_14(DataType dt); +}; + +Dt_size_14::Dt_size_14(DataType dt) { + switch (dt.GetValue()) { + case S16: + SetEncodingValue(0x0); + break; + case S32: + SetEncodingValue(0x1); + break; + case S64: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_15 : public EncodingValue { + public: + explicit Dt_size_15(DataType dt); +}; + +Dt_size_15::Dt_size_15(DataType dt) { + switch (dt.GetValue()) { + case Untyped8: + SetEncodingValue(0x0); + break; + case Untyped16: + SetEncodingValue(0x1); + break; + default: + break; + } +} + +class Dt_size_16 : public EncodingValue { + public: + explicit Dt_size_16(DataType dt); +}; + +Dt_size_16::Dt_size_16(DataType dt) { + switch (dt.GetValue()) { + case F32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Dt_size_17 : public EncodingValue { + public: + explicit Dt_size_17(DataType dt); +}; + +Dt_size_17::Dt_size_17(DataType dt) { + switch (dt.GetValue()) { + case I8: + SetEncodingValue(0x0); + break; + case I16: + SetEncodingValue(0x1); + break; + case I32: + SetEncodingValue(0x2); + break; + default: + break; + } +} + +class Index_1 : public EncodingValue { + public: + Index_1(const NeonRegisterList& nreglist, DataType dt); +}; + +Index_1::Index_1(const NeonRegisterList& nreglist, DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + if ((nreglist.GetTransferLane() & 7) != nreglist.GetTransferLane()) { + return; + } + uint32_t value = nreglist.GetTransferLane() << 1; + if (!nreglist.IsSingleSpaced()) return; + SetEncodingValue(value); + break; + } + case Untyped16: { + if ((nreglist.GetTransferLane() & 3) != nreglist.GetTransferLane()) { + return; + } + uint32_t value = nreglist.GetTransferLane() << 2; + if (nreglist.IsDoubleSpaced()) value |= 2; + SetEncodingValue(value); + break; + } + case Untyped32: { + if ((nreglist.GetTransferLane() & 1) != nreglist.GetTransferLane()) { + return; + } + uint32_t value = nreglist.GetTransferLane() << 3; + if (nreglist.IsDoubleSpaced()) value |= 4; + SetEncodingValue(value); + break; + } + default: + break; + } +} + +class Align_index_align_1 : public EncodingValue { + public: + Align_index_align_1(Alignment align, + const NeonRegisterList& nreglist, + DataType dt); +}; + +Align_index_align_1::Align_index_align_1(Alignment align, + const NeonRegisterList& nreglist, + DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + uint32_t value; + if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 7) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 1; + SetEncodingValue(value); + break; + } + case Untyped16: { + uint32_t value; + if (align.GetType() == k16BitAlign) { + value = 1; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 3) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 2; + SetEncodingValue(value); + break; + } + case Untyped32: { + uint32_t value; + if (align.GetType() == k32BitAlign) { + value = 3; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 1) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 3; + SetEncodingValue(value); + break; + } + default: + break; + } +} + +class Align_index_align_2 : public EncodingValue { + public: + Align_index_align_2(Alignment align, + const NeonRegisterList& nreglist, + DataType dt); +}; + +Align_index_align_2::Align_index_align_2(Alignment align, + const NeonRegisterList& nreglist, + DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + uint32_t value; + if (align.GetType() == k16BitAlign) { + value = 1; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 7) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 1; + if (!nreglist.IsSingleSpaced()) return; + SetEncodingValue(value); + break; + } + case Untyped16: { + uint32_t value; + if (align.GetType() == k32BitAlign) { + value = 1; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 3) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 2; + if (nreglist.IsDoubleSpaced()) value |= 2; + SetEncodingValue(value); + break; + } + case Untyped32: { + uint32_t value; + if (align.GetType() == k64BitAlign) { + value = 1; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 1) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 3; + if (nreglist.IsDoubleSpaced()) value |= 4; + SetEncodingValue(value); + break; + } + default: + break; + } +} + +class Align_index_align_3 : public EncodingValue { + public: + Align_index_align_3(Alignment align, + const NeonRegisterList& nreglist, + DataType dt); +}; + +Align_index_align_3::Align_index_align_3(Alignment align, + const NeonRegisterList& nreglist, + DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + uint32_t value; + if (align.GetType() == k32BitAlign) { + value = 1; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 7) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 1; + if (!nreglist.IsSingleSpaced()) return; + SetEncodingValue(value); + break; + } + case Untyped16: { + uint32_t value; + if (align.GetType() == k64BitAlign) { + value = 1; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 3) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 2; + if (nreglist.IsDoubleSpaced()) value |= 2; + SetEncodingValue(value); + break; + } + case Untyped32: { + uint32_t value; + if (align.GetType() == k64BitAlign) { + value = 1; + } else if (align.GetType() == k128BitAlign) { + value = 2; + } else if (align.GetType() == kNoAlignment) { + value = 0; + } else { + return; + } + if ((nreglist.GetTransferLane() & 1) != nreglist.GetTransferLane()) { + return; + } + value |= nreglist.GetTransferLane() << 3; + if (nreglist.IsDoubleSpaced()) value |= 4; + SetEncodingValue(value); + break; + } + default: + break; + } +} + +class Align_a_1 : public EncodingValue { + public: + Align_a_1(Alignment align, DataType dt); +}; + +Align_a_1::Align_a_1(Alignment align, DataType dt) { + switch (align.GetType()) { + case k16BitAlign: + if (dt.Is(Untyped16)) SetEncodingValue(0x1); + break; + case k32BitAlign: + if (dt.Is(Untyped32)) SetEncodingValue(0x1); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_a_2 : public EncodingValue { + public: + Align_a_2(Alignment align, DataType dt); +}; + +Align_a_2::Align_a_2(Alignment align, DataType dt) { + switch (align.GetType()) { + case k16BitAlign: + if (dt.Is(Untyped8)) SetEncodingValue(0x1); + break; + case k32BitAlign: + if (dt.Is(Untyped16)) SetEncodingValue(0x1); + break; + case k64BitAlign: + if (dt.Is(Untyped32)) SetEncodingValue(0x1); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_a_3 : public EncodingValue { + public: + Align_a_3(Alignment align, DataType dt); +}; + +Align_a_3::Align_a_3(Alignment align, DataType dt) { + switch (align.GetType()) { + case k32BitAlign: + if (dt.Is(Untyped8)) SetEncodingValue(0x1); + break; + case k64BitAlign: + if (dt.Is(Untyped16)) + SetEncodingValue(0x1); + else if (dt.Is(Untyped32)) + SetEncodingValue(0x1); + break; + case k128BitAlign: + if (dt.Is(Untyped32)) SetEncodingValue(0x1); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_align_1 : public EncodingValue { + public: + Align_align_1(Alignment align, const NeonRegisterList& nreglist); +}; + +Align_align_1::Align_align_1(Alignment align, + const NeonRegisterList& nreglist) { + switch (align.GetType()) { + case k64BitAlign: + SetEncodingValue(0x1); + break; + case k128BitAlign: + if ((nreglist.GetLength() == 2) || (nreglist.GetLength() == 4)) + SetEncodingValue(0x2); + break; + case k256BitAlign: + if ((nreglist.GetLength() == 2) || (nreglist.GetLength() == 4)) + SetEncodingValue(0x3); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_align_2 : public EncodingValue { + public: + Align_align_2(Alignment align, const NeonRegisterList& nreglist); +}; + +Align_align_2::Align_align_2(Alignment align, + const NeonRegisterList& nreglist) { + switch (align.GetType()) { + case k64BitAlign: + SetEncodingValue(0x1); + break; + case k128BitAlign: + SetEncodingValue(0x2); + break; + case k256BitAlign: + if ((nreglist.GetLength() == 4)) SetEncodingValue(0x3); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_align_3 : public EncodingValue { + public: + explicit Align_align_3(Alignment align); +}; + +Align_align_3::Align_align_3(Alignment align) { + switch (align.GetType()) { + case k64BitAlign: + SetEncodingValue(0x1); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_align_4 : public EncodingValue { + public: + explicit Align_align_4(Alignment align); +}; + +Align_align_4::Align_align_4(Alignment align) { + switch (align.GetType()) { + case k64BitAlign: + SetEncodingValue(0x1); + break; + case k128BitAlign: + SetEncodingValue(0x2); + break; + case k256BitAlign: + SetEncodingValue(0x3); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + +class Align_align_5 : public EncodingValue { + public: + Align_align_5(Alignment align, const NeonRegisterList& nreglist); +}; + +Align_align_5::Align_align_5(Alignment align, + const NeonRegisterList& nreglist) { + switch (align.GetType()) { + case k64BitAlign: + SetEncodingValue(0x1); + break; + case k128BitAlign: + if ((nreglist.GetLength() == 2) || (nreglist.GetLength() == 4)) + SetEncodingValue(0x2); + break; + case k256BitAlign: + if ((nreglist.GetLength() == 4)) SetEncodingValue(0x3); + break; + case kNoAlignment: + SetEncodingValue(0x0); + break; + default: + break; + } +} + + +// CBNZ{} ,

,
,
, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000710U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VABA{}{}.
, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000710U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVaba, &Assembler::vaba, cond, dt, rd, rn, rm); +} + +void Assembler::vaba( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VABA{}{}.
, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000750U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VABA{}{}.
, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000750U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVaba, &Assembler::vaba, cond, dt, rd, rn, rm); +} + +void Assembler::vabal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VABAL{}{}.
, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800500U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VABAL{}{}.
, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800500U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVabal, &Assembler::vabal, cond, dt, rd, rn, rm); +} + +void Assembler::vabd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VABD{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200d00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VABD{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000700U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VABD{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200d00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VABD{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000700U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVabd, &Assembler::vabd, cond, dt, rd, rn, rm); +} + +void Assembler::vabd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VABD{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200d40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VABD{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000740U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VABD{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200d40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VABD{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000740U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVabd, &Assembler::vabd, cond, dt, rd, rn, rm); +} + +void Assembler::vabdl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VABDL{}{}.
, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800700U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VABDL{}{}.
, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800700U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVabdl, &Assembler::vabdl, cond, dt, rd, rn, rm); +} + +void Assembler::vabs(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VABS{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10300U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VABS{}{}.F64
, ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xeeb00bc0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VABS{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b10300U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + // VABS{}{}.F64
, ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb00bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVabs, &Assembler::vabs, cond, dt, rd, rm); +} + +void Assembler::vabs(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VABS{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10340U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VABS{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b10340U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVabs, &Assembler::vabs, cond, dt, rd, rm); +} + +void Assembler::vabs(Condition cond, DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VABS{}{}.F32 , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xeeb00ac0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VABS{}{}.F32 , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb00ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVabs, &Assembler::vabs, cond, dt, rd, rm); +} + +void Assembler::vacge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VACGE{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000e10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VACGE{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000e10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVacge, &Assembler::vacge, cond, dt, rd, rn, rm); +} + +void Assembler::vacge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VACGE{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000e50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VACGE{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000e50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVacge, &Assembler::vacge, cond, dt, rd, rn, rm); +} + +void Assembler::vacgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VACGT{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200e10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VACGT{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200e10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVacgt, &Assembler::vacgt, cond, dt, rd, rn, rm); +} + +void Assembler::vacgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VACGT{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200e50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VACGT{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200e50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVacgt, &Assembler::vacgt, cond, dt, rd, rn, rm); +} + +void Assembler::vacle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VACLE{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000e10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VACLE{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000e10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVacle, &Assembler::vacle, cond, dt, rd, rn, rm); +} + +void Assembler::vacle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VACLE{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000e50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VACLE{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000e50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVacle, &Assembler::vacle, cond, dt, rd, rn, rm); +} + +void Assembler::vaclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VACLT{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200e10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VACLT{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200e10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVaclt, &Assembler::vaclt, cond, dt, rd, rn, rm); +} + +void Assembler::vaclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VACLT{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200e50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VACLT{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200e50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVaclt, &Assembler::vaclt, cond, dt, rd, rn, rm); +} + +void Assembler::vadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VADD{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000d00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VADD{}{}.F64 {
}, , ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xee300b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VADD{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000800U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VADD{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000d00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VADD{}{}.F64 {
}, , ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e300b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + // VADD{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000800U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVadd, &Assembler::vadd, cond, dt, rd, rn, rm); +} + +void Assembler::vadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VADD{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000d40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VADD{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000840U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VADD{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000d40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VADD{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000840U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVadd, &Assembler::vadd, cond, dt, rd, rn, rm); +} + +void Assembler::vadd( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VADD{}{}.F32 {}, , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xee300a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VADD{}{}.F32 {}, , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e300a00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVadd, &Assembler::vadd, cond, dt, rd, rn, rm); +} + +void Assembler::vaddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VADDHN{}{}.
, , ; T1 + if (encoded_dt.IsValid() && (dt.Is(I16) || dt.Is(I32) || dt.Is(I64))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800400U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VADDHN{}{}.
, , ; A1 + if (encoded_dt.IsValid() && (dt.Is(I16) || dt.Is(I32) || dt.Is(I64))) { + if (cond.Is(al)) { + EmitA32(0xf2800400U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVaddhn, &Assembler::vaddhn, cond, dt, rd, rn, rm); +} + +void Assembler::vaddl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VADDL{}{}.
, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800000U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VADDL{}{}.
, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800000U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVaddl, &Assembler::vaddl, cond, dt, rd, rn, rm); +} + +void Assembler::vaddw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VADDW{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800100U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VADDW{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800100U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVaddw, &Assembler::vaddw, cond, dt, rd, rn, rm); +} + +void Assembler::vand(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVand encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VAND{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800030U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VAND{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al)) { + EmitA32(0xf2800030U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsRegister()) { + DRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VAND{}{}{.
} {
}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VAND{}{}{.
} {
}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf2000110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVand, &Assembler::vand, cond, dt, rd, rn, operand); +} + +void Assembler::vand(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVand encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VAND{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800070U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VAND{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al)) { + EmitA32(0xf2800070U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsRegister()) { + QRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VAND{}{}{.
} {}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VAND{}{}{.
} {}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf2000150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVand, &Assembler::vand, cond, dt, rd, rn, operand); +} + +void Assembler::vbic(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVbic encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VBIC{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800030U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VBIC{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al)) { + EmitA32(0xf2800030U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsRegister()) { + DRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VBIC{}{}{.
} {
}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef100110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VBIC{}{}{.
} {
}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf2100110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVbic, &Assembler::vbic, cond, dt, rd, rn, operand); +} + +void Assembler::vbic(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVbic encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VBIC{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800070U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VBIC{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al)) { + EmitA32(0xf2800070U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsRegister()) { + QRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VBIC{}{}{.
} {}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef100150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VBIC{}{}{.
} {}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf2100150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVbic, &Assembler::vbic, cond, dt, rd, rn, operand); +} + +void Assembler::vbif( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VBIF{}{}{.
} {
}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff300110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VBIF{}{}{.
} {
}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3300110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVbif, &Assembler::vbif, cond, dt, rd, rn, rm); +} + +void Assembler::vbif( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VBIF{}{}{.
} {}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff300150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VBIF{}{}{.
} {}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3300150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVbif, &Assembler::vbif, cond, dt, rd, rn, rm); +} + +void Assembler::vbit( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VBIT{}{}{.
} {
}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VBIT{}{}{.
} {
}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3200110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVbit, &Assembler::vbit, cond, dt, rd, rn, rm); +} + +void Assembler::vbit( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VBIT{}{}{.
} {}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VBIT{}{}{.
} {}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3200150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVbit, &Assembler::vbit, cond, dt, rd, rn, rm); +} + +void Assembler::vbsl( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VBSL{}{}{.
} {
}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff100110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VBSL{}{}{.
} {
}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3100110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVbsl, &Assembler::vbsl, cond, dt, rd, rn, rm); +} + +void Assembler::vbsl( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VBSL{}{}{.
} {}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff100150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VBSL{}{}{.
} {}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3100150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVbsl, &Assembler::vbsl, cond, dt, rd, rn, rm); +} + +void Assembler::vceq(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VCEQ{}{}.
{
}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10100U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCEQ{}{}.
{
}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b10100U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVceq, &Assembler::vceq, cond, dt, rd, rm, operand); +} + +void Assembler::vceq(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VCEQ{}{}.
{}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10140U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCEQ{}{}.
{}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b10140U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVceq, &Assembler::vceq, cond, dt, rd, rm, operand); +} + +void Assembler::vceq( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_4 encoded_dt(dt); + Dt_sz_1 encoded_dt_2(dt); + if (IsUsingT32()) { + // VCEQ{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000810U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VCEQ{}{}.
{
}, , ; T2 + if (encoded_dt_2.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000e00U | (encoded_dt_2.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCEQ{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3000810U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + // VCEQ{}{}.
{
}, , ; A2 + if (encoded_dt_2.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000e00U | (encoded_dt_2.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVceq, &Assembler::vceq, cond, dt, rd, rn, rm); +} + +void Assembler::vceq( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_4 encoded_dt(dt); + Dt_sz_1 encoded_dt_2(dt); + if (IsUsingT32()) { + // VCEQ{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000850U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VCEQ{}{}.
{}, , ; T2 + if (encoded_dt_2.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000e40U | (encoded_dt_2.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCEQ{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3000850U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + // VCEQ{}{}.
{}, , ; A2 + if (encoded_dt_2.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000e40U | (encoded_dt_2.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVceq, &Assembler::vceq, cond, dt, rd, rn, rm); +} + +void Assembler::vcge(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCGE{}{}.
{
}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10080U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCGE{}{}.
{
}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b10080U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVcge, &Assembler::vcge, cond, dt, rd, rm, operand); +} + +void Assembler::vcge(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCGE{}{}.
{}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb100c0U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCGE{}{}.
{}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b100c0U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVcge, &Assembler::vcge, cond, dt, rd, rm, operand); +} + +void Assembler::vcge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCGE{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000310U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VCGE{}{}.F32 {
}, , ; T2 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000e00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCGE{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000310U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + // VCGE{}{}.F32 {
}, , ; A2 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000e00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcge, &Assembler::vcge, cond, dt, rd, rn, rm); +} + +void Assembler::vcge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCGE{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000350U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VCGE{}{}.F32 {}, , ; T2 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000e40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCGE{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000350U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + // VCGE{}{}.F32 {}, , ; A2 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000e40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcge, &Assembler::vcge, cond, dt, rd, rn, rm); +} + +void Assembler::vcgt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCGT{}{}.
{
}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10000U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCGT{}{}.
{
}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b10000U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVcgt, &Assembler::vcgt, cond, dt, rd, rm, operand); +} + +void Assembler::vcgt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCGT{}{}.
{}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10040U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCGT{}{}.
{}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b10040U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVcgt, &Assembler::vcgt, cond, dt, rd, rm, operand); +} + +void Assembler::vcgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCGT{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000300U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VCGT{}{}.F32 {
}, , ; T2 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200e00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCGT{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000300U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + // VCGT{}{}.F32 {
}, , ; A2 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200e00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcgt, &Assembler::vcgt, cond, dt, rd, rn, rm); +} + +void Assembler::vcgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCGT{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000340U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VCGT{}{}.F32 {}, , ; T2 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200e40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCGT{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000340U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + // VCGT{}{}.F32 {}, , ; A2 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200e40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcgt, &Assembler::vcgt, cond, dt, rd, rn, rm); +} + +void Assembler::vcle(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCLE{}{}.
{
}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10180U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCLE{}{}.
{
}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b10180U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVcle, &Assembler::vcle, cond, dt, rd, rm, operand); +} + +void Assembler::vcle(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCLE{}{}.
{}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb101c0U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCLE{}{}.
{}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b101c0U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVcle, &Assembler::vcle, cond, dt, rd, rm, operand); +} + +void Assembler::vcle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCLE{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000310U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(5, 0) | rm.Encode(7, 16)); + AdvanceIT(); + return; + } + } + // VCLE{}{}.F32 {
}, , ; T2 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000e00U | rd.Encode(22, 12) | rn.Encode(5, 0) | + rm.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VCLE{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000310U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(5, 0) | rm.Encode(7, 16)); + return; + } + } + // VCLE{}{}.F32 {
}, , ; A2 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000e00U | rd.Encode(22, 12) | rn.Encode(5, 0) | + rm.Encode(7, 16)); + return; + } + } + } + Delegate(kVcle, &Assembler::vcle, cond, dt, rd, rn, rm); +} + +void Assembler::vcle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCLE{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000350U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(5, 0) | rm.Encode(7, 16)); + AdvanceIT(); + return; + } + } + // VCLE{}{}.F32 {}, , ; T2 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000e40U | rd.Encode(22, 12) | rn.Encode(5, 0) | + rm.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VCLE{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000350U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(5, 0) | rm.Encode(7, 16)); + return; + } + } + // VCLE{}{}.F32 {}, , ; A2 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000e40U | rd.Encode(22, 12) | rn.Encode(5, 0) | + rm.Encode(7, 16)); + return; + } + } + } + Delegate(kVcle, &Assembler::vcle, cond, dt, rd, rn, rm); +} + +void Assembler::vcls(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_5 encoded_dt(dt); + if (IsUsingT32()) { + // VCLS{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00400U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCLS{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00400U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcls, &Assembler::vcls, cond, dt, rd, rm); +} + +void Assembler::vcls(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_5 encoded_dt(dt); + if (IsUsingT32()) { + // VCLS{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00440U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCLS{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00440U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcls, &Assembler::vcls, cond, dt, rd, rm); +} + +void Assembler::vclt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCLT{}{}.
{
}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10200U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCLT{}{}.
{
}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b10200U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVclt, &Assembler::vclt, cond, dt, rd, rm, operand); +} + +void Assembler::vclt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCLT{}{}.
{}, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10240U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCLT{}{}.
{}, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b10240U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVclt, &Assembler::vclt, cond, dt, rd, rm, operand); +} + +void Assembler::vclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCLT{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000300U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(5, 0) | rm.Encode(7, 16)); + AdvanceIT(); + return; + } + } + // VCLT{}{}.F32 {
}, , ; T2 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200e00U | rd.Encode(22, 12) | rn.Encode(5, 0) | + rm.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VCLT{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000300U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(5, 0) | rm.Encode(7, 16)); + return; + } + } + // VCLT{}{}.F32 {
}, , ; A2 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200e00U | rd.Encode(22, 12) | rn.Encode(5, 0) | + rm.Encode(7, 16)); + return; + } + } + } + Delegate(kVclt, &Assembler::vclt, cond, dt, rd, rn, rm); +} + +void Assembler::vclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VCLT{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000340U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(5, 0) | rm.Encode(7, 16)); + AdvanceIT(); + return; + } + } + // VCLT{}{}.F32 {}, , ; T2 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200e40U | rd.Encode(22, 12) | rn.Encode(5, 0) | + rm.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VCLT{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000340U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(5, 0) | rm.Encode(7, 16)); + return; + } + } + // VCLT{}{}.F32 {}, , ; A2 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200e40U | rd.Encode(22, 12) | rn.Encode(5, 0) | + rm.Encode(7, 16)); + return; + } + } + } + Delegate(kVclt, &Assembler::vclt, cond, dt, rd, rn, rm); +} + +void Assembler::vclz(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_4 encoded_dt(dt); + if (IsUsingT32()) { + // VCLZ{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00480U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCLZ{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00480U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVclz, &Assembler::vclz, cond, dt, rd, rm); +} + +void Assembler::vclz(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_4 encoded_dt(dt); + if (IsUsingT32()) { + // VCLZ{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb004c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCLZ{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b004c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVclz, &Assembler::vclz, cond, dt, rd, rm); +} + +void Assembler::vcmp(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsRegister()) { + SRegister rm = operand.GetRegister(); + if (IsUsingT32()) { + // VCMP{}{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xeeb40a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCMP{}{}.F32 , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb40a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + } + if (operand.IsImmediate()) { + if (IsUsingT32()) { + // VCMP{}{}.F32 , #0.0 ; T2 + if (dt.Is(F32) && (operand.IsFloatZero())) { + EmitT32_32(0xeeb50a40U | rd.Encode(22, 12)); + AdvanceIT(); + return; + } + } else { + // VCMP{}{}.F32 , #0.0 ; A2 + if (dt.Is(F32) && (operand.IsFloatZero()) && cond.IsNotNever()) { + EmitA32(0x0eb50a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12)); + return; + } + } + } + Delegate(kVcmp, &Assembler::vcmp, cond, dt, rd, operand); +} + +void Assembler::vcmp(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsRegister()) { + DRegister rm = operand.GetRegister(); + if (IsUsingT32()) { + // VCMP{}{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xeeb40b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCMP{}{}.F64
, ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb40b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + } + if (operand.IsImmediate()) { + if (IsUsingT32()) { + // VCMP{}{}.F64
, #0.0 ; T2 + if (dt.Is(F64) && (operand.IsFloatZero())) { + EmitT32_32(0xeeb50b40U | rd.Encode(22, 12)); + AdvanceIT(); + return; + } + } else { + // VCMP{}{}.F64
, #0.0 ; A2 + if (dt.Is(F64) && (operand.IsFloatZero()) && cond.IsNotNever()) { + EmitA32(0x0eb50b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12)); + return; + } + } + } + Delegate(kVcmp, &Assembler::vcmp, cond, dt, rd, operand); +} + +void Assembler::vcmpe(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsRegister()) { + SRegister rm = operand.GetRegister(); + if (IsUsingT32()) { + // VCMPE{}{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xeeb40ac0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCMPE{}{}.F32 , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb40ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + } + if (operand.IsImmediate()) { + if (IsUsingT32()) { + // VCMPE{}{}.F32 , #0.0 ; T2 + if (dt.Is(F32) && (operand.IsFloatZero())) { + EmitT32_32(0xeeb50ac0U | rd.Encode(22, 12)); + AdvanceIT(); + return; + } + } else { + // VCMPE{}{}.F32 , #0.0 ; A2 + if (dt.Is(F32) && (operand.IsFloatZero()) && cond.IsNotNever()) { + EmitA32(0x0eb50ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12)); + return; + } + } + } + Delegate(kVcmpe, &Assembler::vcmpe, cond, dt, rd, operand); +} + +void Assembler::vcmpe(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsRegister()) { + DRegister rm = operand.GetRegister(); + if (IsUsingT32()) { + // VCMPE{}{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xeeb40bc0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCMPE{}{}.F64
, ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb40bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + } + if (operand.IsImmediate()) { + if (IsUsingT32()) { + // VCMPE{}{}.F64
, #0.0 ; T2 + if (dt.Is(F64) && (operand.IsFloatZero())) { + EmitT32_32(0xeeb50bc0U | rd.Encode(22, 12)); + AdvanceIT(); + return; + } + } else { + // VCMPE{}{}.F64
, #0.0 ; A2 + if (dt.Is(F64) && (operand.IsFloatZero()) && cond.IsNotNever()) { + EmitA32(0x0eb50bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12)); + return; + } + } + } + Delegate(kVcmpe, &Assembler::vcmpe, cond, dt, rd, operand); +} + +void Assembler::vcnt(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCNT{}{}.8
, ; T1 + if (dt.Is(Untyped8)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00500U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCNT{}{}.8
, ; A1 + if (dt.Is(Untyped8)) { + if (cond.Is(al)) { + EmitA32(0xf3b00500U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcnt, &Assembler::vcnt, cond, dt, rd, rm); +} + +void Assembler::vcnt(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCNT{}{}.8 , ; T1 + if (dt.Is(Untyped8)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00540U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCNT{}{}.8 , ; A1 + if (dt.Is(Untyped8)) { + if (cond.Is(al)) { + EmitA32(0xf3b00540U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcnt, &Assembler::vcnt, cond, dt, rd, rm); +} + +void Assembler::vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_2 encoded_dt(dt2); + if (IsUsingT32()) { + // VCVT{}{}.F64.F32
, ; T1 + if (dt1.Is(F64) && dt2.Is(F32)) { + EmitT32_32(0xeeb70ac0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VCVT{}{}.F64.
, ; T1 + if (dt1.Is(F64) && encoded_dt.IsValid()) { + EmitT32_32(0xeeb80b40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVT{}{}.F64.F32
, ; A1 + if (dt1.Is(F64) && dt2.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb70ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VCVT{}{}.F64.
, ; A1 + if (dt1.Is(F64) && encoded_dt.IsValid() && cond.IsNotNever()) { + EmitA32(0x0eb80b40U | (cond.GetCondition() << 28) | + (encoded_dt.GetEncodingValue() << 7) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVT{}{}.F32.F64 , ; T1 + if (dt1.Is(F32) && dt2.Is(F64)) { + EmitT32_32(0xeeb70bc0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VCVT{}{}.U32.F64 , ; T1 + if (dt1.Is(U32) && dt2.Is(F64)) { + EmitT32_32(0xeebc0bc0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VCVT{}{}.S32.F64 , ; T1 + if (dt1.Is(S32) && dt2.Is(F64)) { + EmitT32_32(0xeebd0bc0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVT{}{}.F32.F64 , ; A1 + if (dt1.Is(F32) && dt2.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb70bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VCVT{}{}.U32.F64 , ; A1 + if (dt1.Is(U32) && dt2.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0ebc0bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VCVT{}{}.S32.F64 , ; A1 + if (dt1.Is(S32) && dt2.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0ebd0bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvt(Condition cond, + DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm, + int32_t fbits) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_U_1 encoded_dt(dt1, dt2); + Dt_U_sx_1 encoded_dt_2(dt2); + Dt_U_sx_1 encoded_dt_3(dt1); + if (IsUsingT32()) { + // VCVT{}{}.
.
, , # ; T1 + if (encoded_dt.IsValid() && (fbits >= 1) && (fbits <= 32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t fbits_ = 64 - fbits; + EmitT32_32(0xef800e10U | ((encoded_dt.GetEncodingValue() & 0x1) << 28) | + ((encoded_dt.GetEncodingValue() & 0x2) << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (fbits_ << 16)); + AdvanceIT(); + return; + } + } + // VCVT{}{}.F64.
, , # ; T1 + if (dt1.Is(F64) && encoded_dt_2.IsValid() && rd.Is(rm) && + (((dt2.Is(S16) || dt2.Is(U16)) && (fbits <= 16)) || + ((dt2.Is(S32) || dt2.Is(U32)) && (fbits >= 1) && (fbits <= 32)))) { + unsigned offset = 32; + if (dt2.Is(S16) || dt2.Is(U16)) { + offset = 16; + } + uint32_t fbits_ = offset - fbits; + EmitT32_32(0xeeba0b40U | ((encoded_dt_2.GetEncodingValue() & 0x1) << 7) | + ((encoded_dt_2.GetEncodingValue() & 0x2) << 15) | + rd.Encode(22, 12) | ((fbits_ & 0x1) << 5) | + ((fbits_ & 0x1e) >> 1)); + AdvanceIT(); + return; + } + // VCVT{}{}.
.F64 , , # ; T1 + if (encoded_dt_3.IsValid() && dt2.Is(F64) && rd.Is(rm) && + (((dt1.Is(S16) || dt1.Is(U16)) && (fbits <= 16)) || + ((dt1.Is(S32) || dt1.Is(U32)) && (fbits >= 1) && (fbits <= 32)))) { + unsigned offset = 32; + if (dt1.Is(S16) || dt1.Is(U16)) { + offset = 16; + } + uint32_t fbits_ = offset - fbits; + EmitT32_32(0xeebe0b40U | ((encoded_dt_3.GetEncodingValue() & 0x1) << 7) | + ((encoded_dt_3.GetEncodingValue() & 0x2) << 15) | + rd.Encode(22, 12) | ((fbits_ & 0x1) << 5) | + ((fbits_ & 0x1e) >> 1)); + AdvanceIT(); + return; + } + } else { + // VCVT{}{}.
.
, , # ; A1 + if (encoded_dt.IsValid() && (fbits >= 1) && (fbits <= 32)) { + if (cond.Is(al)) { + uint32_t fbits_ = 64 - fbits; + EmitA32(0xf2800e10U | ((encoded_dt.GetEncodingValue() & 0x1) << 24) | + ((encoded_dt.GetEncodingValue() & 0x2) << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (fbits_ << 16)); + return; + } + } + // VCVT{}{}.F64.
, , # ; A1 + if (dt1.Is(F64) && encoded_dt_2.IsValid() && rd.Is(rm) && + (((dt2.Is(S16) || dt2.Is(U16)) && (fbits <= 16)) || + ((dt2.Is(S32) || dt2.Is(U32)) && (fbits >= 1) && (fbits <= 32))) && + cond.IsNotNever()) { + unsigned offset = 32; + if (dt2.Is(S16) || dt2.Is(U16)) { + offset = 16; + } + uint32_t fbits_ = offset - fbits; + EmitA32(0x0eba0b40U | (cond.GetCondition() << 28) | + ((encoded_dt_2.GetEncodingValue() & 0x1) << 7) | + ((encoded_dt_2.GetEncodingValue() & 0x2) << 15) | + rd.Encode(22, 12) | ((fbits_ & 0x1) << 5) | + ((fbits_ & 0x1e) >> 1)); + return; + } + // VCVT{}{}.
.F64 , , # ; A1 + if (encoded_dt_3.IsValid() && dt2.Is(F64) && rd.Is(rm) && + (((dt1.Is(S16) || dt1.Is(U16)) && (fbits <= 16)) || + ((dt1.Is(S32) || dt1.Is(U32)) && (fbits >= 1) && (fbits <= 32))) && + cond.IsNotNever()) { + unsigned offset = 32; + if (dt1.Is(S16) || dt1.Is(U16)) { + offset = 16; + } + uint32_t fbits_ = offset - fbits; + EmitA32(0x0ebe0b40U | (cond.GetCondition() << 28) | + ((encoded_dt_3.GetEncodingValue() & 0x1) << 7) | + ((encoded_dt_3.GetEncodingValue() & 0x2) << 15) | + rd.Encode(22, 12) | ((fbits_ & 0x1) << 5) | + ((fbits_ & 0x1e) >> 1)); + return; + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm, fbits); +} + +void Assembler::vcvt(Condition cond, + DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm, + int32_t fbits) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_U_1 encoded_dt(dt1, dt2); + if (IsUsingT32()) { + // VCVT{}{}.
.
, , # ; T1 + if (encoded_dt.IsValid() && (fbits >= 1) && (fbits <= 32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t fbits_ = 64 - fbits; + EmitT32_32(0xef800e50U | ((encoded_dt.GetEncodingValue() & 0x1) << 28) | + ((encoded_dt.GetEncodingValue() & 0x2) << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (fbits_ << 16)); + AdvanceIT(); + return; + } + } + } else { + // VCVT{}{}.
.
, , # ; A1 + if (encoded_dt.IsValid() && (fbits >= 1) && (fbits <= 32)) { + if (cond.Is(al)) { + uint32_t fbits_ = 64 - fbits; + EmitA32(0xf2800e50U | ((encoded_dt.GetEncodingValue() & 0x1) << 24) | + ((encoded_dt.GetEncodingValue() & 0x2) << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (fbits_ << 16)); + return; + } + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm, fbits); +} + +void Assembler::vcvt(Condition cond, + DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm, + int32_t fbits) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_sx_1 encoded_dt(dt2); + Dt_U_sx_1 encoded_dt_2(dt1); + if (IsUsingT32()) { + // VCVT{}{}.F32.
, , # ; T1 + if (dt1.Is(F32) && encoded_dt.IsValid() && rd.Is(rm) && + (((dt2.Is(S16) || dt2.Is(U16)) && (fbits <= 16)) || + ((dt2.Is(S32) || dt2.Is(U32)) && (fbits >= 1) && (fbits <= 32)))) { + unsigned offset = 32; + if (dt2.Is(S16) || dt2.Is(U16)) { + offset = 16; + } + uint32_t fbits_ = offset - fbits; + EmitT32_32(0xeeba0a40U | ((encoded_dt.GetEncodingValue() & 0x1) << 7) | + ((encoded_dt.GetEncodingValue() & 0x2) << 15) | + rd.Encode(22, 12) | ((fbits_ & 0x1) << 5) | + ((fbits_ & 0x1e) >> 1)); + AdvanceIT(); + return; + } + // VCVT{}{}.
.F32 , , # ; T1 + if (encoded_dt_2.IsValid() && dt2.Is(F32) && rd.Is(rm) && + (((dt1.Is(S16) || dt1.Is(U16)) && (fbits <= 16)) || + ((dt1.Is(S32) || dt1.Is(U32)) && (fbits >= 1) && (fbits <= 32)))) { + unsigned offset = 32; + if (dt1.Is(S16) || dt1.Is(U16)) { + offset = 16; + } + uint32_t fbits_ = offset - fbits; + EmitT32_32(0xeebe0a40U | ((encoded_dt_2.GetEncodingValue() & 0x1) << 7) | + ((encoded_dt_2.GetEncodingValue() & 0x2) << 15) | + rd.Encode(22, 12) | ((fbits_ & 0x1) << 5) | + ((fbits_ & 0x1e) >> 1)); + AdvanceIT(); + return; + } + } else { + // VCVT{}{}.F32.
, , # ; A1 + if (dt1.Is(F32) && encoded_dt.IsValid() && rd.Is(rm) && + (((dt2.Is(S16) || dt2.Is(U16)) && (fbits <= 16)) || + ((dt2.Is(S32) || dt2.Is(U32)) && (fbits >= 1) && (fbits <= 32))) && + cond.IsNotNever()) { + unsigned offset = 32; + if (dt2.Is(S16) || dt2.Is(U16)) { + offset = 16; + } + uint32_t fbits_ = offset - fbits; + EmitA32(0x0eba0a40U | (cond.GetCondition() << 28) | + ((encoded_dt.GetEncodingValue() & 0x1) << 7) | + ((encoded_dt.GetEncodingValue() & 0x2) << 15) | + rd.Encode(22, 12) | ((fbits_ & 0x1) << 5) | + ((fbits_ & 0x1e) >> 1)); + return; + } + // VCVT{}{}.
.F32 , , # ; A1 + if (encoded_dt_2.IsValid() && dt2.Is(F32) && rd.Is(rm) && + (((dt1.Is(S16) || dt1.Is(U16)) && (fbits <= 16)) || + ((dt1.Is(S32) || dt1.Is(U32)) && (fbits >= 1) && (fbits <= 32))) && + cond.IsNotNever()) { + unsigned offset = 32; + if (dt1.Is(S16) || dt1.Is(U16)) { + offset = 16; + } + uint32_t fbits_ = offset - fbits; + EmitA32(0x0ebe0a40U | (cond.GetCondition() << 28) | + ((encoded_dt_2.GetEncodingValue() & 0x1) << 7) | + ((encoded_dt_2.GetEncodingValue() & 0x2) << 15) | + rd.Encode(22, 12) | ((fbits_ & 0x1) << 5) | + ((fbits_ & 0x1e) >> 1)); + return; + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm, fbits); +} + +void Assembler::vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_1 encoded_dt(dt1, dt2); + if (IsUsingT32()) { + // VCVT{}{}.
.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffbb0600U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCVT{}{}.
.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3bb0600U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_1 encoded_dt(dt1, dt2); + if (IsUsingT32()) { + // VCVT{}{}.
.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffbb0640U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCVT{}{}.
.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3bb0640U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVT{}{}.F16.F32
, ; T1 + if (dt1.Is(F16) && dt2.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb60600U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCVT{}{}.F16.F32
, ; A1 + if (dt1.Is(F16) && dt2.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3b60600U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVT{}{}.F32.F16 , ; T1 + if (dt1.Is(F32) && dt2.Is(F16)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb60700U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VCVT{}{}.F32.F16 , ; A1 + if (dt1.Is(F32) && dt2.Is(F16)) { + if (cond.Is(al)) { + EmitA32(0xf3b60700U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_2 encoded_dt(dt2); + if (IsUsingT32()) { + // VCVT{}{}.U32.F32 , ; T1 + if (dt1.Is(U32) && dt2.Is(F32)) { + EmitT32_32(0xeebc0ac0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VCVT{}{}.S32.F32 , ; T1 + if (dt1.Is(S32) && dt2.Is(F32)) { + EmitT32_32(0xeebd0ac0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VCVT{}{}.F32.
, ; T1 + if (dt1.Is(F32) && encoded_dt.IsValid()) { + EmitT32_32(0xeeb80a40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVT{}{}.U32.F32 , ; A1 + if (dt1.Is(U32) && dt2.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0ebc0ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VCVT{}{}.S32.F32 , ; A1 + if (dt1.Is(S32) && dt2.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0ebd0ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VCVT{}{}.F32.
, ; A1 + if (dt1.Is(F32) && encoded_dt.IsValid() && cond.IsNotNever()) { + EmitA32(0x0eb80a40U | (cond.GetCondition() << 28) | + (encoded_dt.GetEncodingValue() << 7) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvt, &Assembler::vcvt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvta(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_3 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTA{}.
.F32
, ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xffbb0000U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTA{}.
.F32
, ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xf3bb0000U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvta, &Assembler::vcvta, dt1, dt2, rd, rm); +} + +void Assembler::vcvta(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_3 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTA{}.
.F32 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xffbb0040U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTA{}.
.F32 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xf3bb0040U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvta, &Assembler::vcvta, dt1, dt2, rd, rm); +} + +void Assembler::vcvta(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_2 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTA{}.
.F32 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xfebc0a40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTA{}.
.F32 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xfebc0a40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvta, &Assembler::vcvta, dt1, dt2, rd, rm); +} + +void Assembler::vcvta(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_2 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTA{}.
.F64 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F64)) { + EmitT32_32(0xfebc0b40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTA{}.
.F64 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F64)) { + EmitA32(0xfebc0b40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvta, &Assembler::vcvta, dt1, dt2, rd, rm); +} + +void Assembler::vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVTB{}{}.F32.F16 , ; T1 + if (dt1.Is(F32) && dt2.Is(F16)) { + EmitT32_32(0xeeb20a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VCVTB{}{}.F16.F32 , ; T1 + if (dt1.Is(F16) && dt2.Is(F32)) { + EmitT32_32(0xeeb30a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTB{}{}.F32.F16 , ; A1 + if (dt1.Is(F32) && dt2.Is(F16) && cond.IsNotNever()) { + EmitA32(0x0eb20a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VCVTB{}{}.F16.F32 , ; A1 + if (dt1.Is(F16) && dt2.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb30a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtb, &Assembler::vcvtb, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvtb( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVTB{}{}.F64.F16
, ; T1 + if (dt1.Is(F64) && dt2.Is(F16)) { + EmitT32_32(0xeeb20b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTB{}{}.F64.F16
, ; A1 + if (dt1.Is(F64) && dt2.Is(F16) && cond.IsNotNever()) { + EmitA32(0x0eb20b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtb, &Assembler::vcvtb, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVTB{}{}.F16.F64 , ; T1 + if (dt1.Is(F16) && dt2.Is(F64)) { + EmitT32_32(0xeeb30b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTB{}{}.F16.F64 , ; A1 + if (dt1.Is(F16) && dt2.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb30b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtb, &Assembler::vcvtb, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvtm(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_3 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTM{}.
.F32
, ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xffbb0300U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTM{}.
.F32
, ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xf3bb0300U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtm, &Assembler::vcvtm, dt1, dt2, rd, rm); +} + +void Assembler::vcvtm(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_3 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTM{}.
.F32 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xffbb0340U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTM{}.
.F32 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xf3bb0340U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtm, &Assembler::vcvtm, dt1, dt2, rd, rm); +} + +void Assembler::vcvtm(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_2 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTM{}.
.F32 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xfebf0a40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTM{}.
.F32 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xfebf0a40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtm, &Assembler::vcvtm, dt1, dt2, rd, rm); +} + +void Assembler::vcvtm(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_2 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTM{}.
.F64 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F64)) { + EmitT32_32(0xfebf0b40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTM{}.
.F64 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F64)) { + EmitA32(0xfebf0b40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtm, &Assembler::vcvtm, dt1, dt2, rd, rm); +} + +void Assembler::vcvtn(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_3 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTN{}.
.F32
, ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xffbb0100U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTN{}.
.F32
, ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xf3bb0100U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtn, &Assembler::vcvtn, dt1, dt2, rd, rm); +} + +void Assembler::vcvtn(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_3 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTN{}.
.F32 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xffbb0140U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTN{}.
.F32 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xf3bb0140U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtn, &Assembler::vcvtn, dt1, dt2, rd, rm); +} + +void Assembler::vcvtn(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_2 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTN{}.
.F32 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xfebd0a40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTN{}.
.F32 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xfebd0a40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtn, &Assembler::vcvtn, dt1, dt2, rd, rm); +} + +void Assembler::vcvtn(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_2 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTN{}.
.F64 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F64)) { + EmitT32_32(0xfebd0b40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTN{}.
.F64 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F64)) { + EmitA32(0xfebd0b40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtn, &Assembler::vcvtn, dt1, dt2, rd, rm); +} + +void Assembler::vcvtp(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_3 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTP{}.
.F32
, ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xffbb0200U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTP{}.
.F32
, ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xf3bb0200U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtp, &Assembler::vcvtp, dt1, dt2, rd, rm); +} + +void Assembler::vcvtp(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_3 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTP{}.
.F32 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xffbb0240U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTP{}.
.F32 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xf3bb0240U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtp, &Assembler::vcvtp, dt1, dt2, rd, rm); +} + +void Assembler::vcvtp(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_2 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTP{}.
.F32 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitT32_32(0xfebe0a40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTP{}.
.F32 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F32)) { + EmitA32(0xfebe0a40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtp, &Assembler::vcvtp, dt1, dt2, rd, rm); +} + +void Assembler::vcvtp(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_op_2 encoded_dt(dt1); + if (IsUsingT32()) { + // VCVTP{}.
.F64 , ; T1 + if (encoded_dt.IsValid() && dt2.Is(F64)) { + EmitT32_32(0xfebe0b40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTP{}.
.F64 , ; A1 + if (encoded_dt.IsValid() && dt2.Is(F64)) { + EmitA32(0xfebe0b40U | (encoded_dt.GetEncodingValue() << 7) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtp, &Assembler::vcvtp, dt1, dt2, rd, rm); +} + +void Assembler::vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVTR{}{}.U32.F32 , ; T1 + if (dt1.Is(U32) && dt2.Is(F32)) { + EmitT32_32(0xeebc0a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VCVTR{}{}.S32.F32 , ; T1 + if (dt1.Is(S32) && dt2.Is(F32)) { + EmitT32_32(0xeebd0a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTR{}{}.U32.F32 , ; A1 + if (dt1.Is(U32) && dt2.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0ebc0a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VCVTR{}{}.S32.F32 , ; A1 + if (dt1.Is(S32) && dt2.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0ebd0a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtr, &Assembler::vcvtr, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVTR{}{}.U32.F64 , ; T1 + if (dt1.Is(U32) && dt2.Is(F64)) { + EmitT32_32(0xeebc0b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VCVTR{}{}.S32.F64 , ; T1 + if (dt1.Is(S32) && dt2.Is(F64)) { + EmitT32_32(0xeebd0b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTR{}{}.U32.F64 , ; A1 + if (dt1.Is(U32) && dt2.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0ebc0b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VCVTR{}{}.S32.F64 , ; A1 + if (dt1.Is(S32) && dt2.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0ebd0b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtr, &Assembler::vcvtr, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVTT{}{}.F32.F16 , ; T1 + if (dt1.Is(F32) && dt2.Is(F16)) { + EmitT32_32(0xeeb20ac0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VCVTT{}{}.F16.F32 , ; T1 + if (dt1.Is(F16) && dt2.Is(F32)) { + EmitT32_32(0xeeb30ac0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTT{}{}.F32.F16 , ; A1 + if (dt1.Is(F32) && dt2.Is(F16) && cond.IsNotNever()) { + EmitA32(0x0eb20ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VCVTT{}{}.F16.F32 , ; A1 + if (dt1.Is(F16) && dt2.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb30ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtt, &Assembler::vcvtt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvtt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVTT{}{}.F64.F16
, ; T1 + if (dt1.Is(F64) && dt2.Is(F16)) { + EmitT32_32(0xeeb20bc0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTT{}{}.F64.F16
, ; A1 + if (dt1.Is(F64) && dt2.Is(F16) && cond.IsNotNever()) { + EmitA32(0x0eb20bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtt, &Assembler::vcvtt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VCVTT{}{}.F16.F64 , ; T1 + if (dt1.Is(F16) && dt2.Is(F64)) { + EmitT32_32(0xeeb30bc0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VCVTT{}{}.F16.F64 , ; A1 + if (dt1.Is(F16) && dt2.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb30bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVcvtt, &Assembler::vcvtt, cond, dt1, dt2, rd, rm); +} + +void Assembler::vdiv( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VDIV{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xee800a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VDIV{}{}.F32 {}, , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e800a00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVdiv, &Assembler::vdiv, cond, dt, rd, rn, rm); +} + +void Assembler::vdiv( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VDIV{}{}.F64 {
}, , ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xee800b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VDIV{}{}.F64 {
}, , ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e800b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVdiv, &Assembler::vdiv, cond, dt, rd, rn, rm); +} + +void Assembler::vdup(Condition cond, DataType dt, QRegister rd, Register rt) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_B_E_1 encoded_dt(dt); + if (IsUsingT32()) { + // VDUP{}{}.
, ; T1 + if (encoded_dt.IsValid() && (!rt.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xeea00b10U | ((encoded_dt.GetEncodingValue() & 0x1) << 5) | + ((encoded_dt.GetEncodingValue() & 0x2) << 21) | + rd.Encode(7, 16) | (rt.GetCode() << 12)); + AdvanceIT(); + return; + } + } + } else { + // VDUP{}{}.
, ; A1 + if (encoded_dt.IsValid() && cond.IsNotNever() && + (!rt.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitA32(0x0ea00b10U | (cond.GetCondition() << 28) | + ((encoded_dt.GetEncodingValue() & 0x1) << 5) | + ((encoded_dt.GetEncodingValue() & 0x2) << 21) | + rd.Encode(7, 16) | (rt.GetCode() << 12)); + return; + } + } + } + Delegate(kVdup, &Assembler::vdup, cond, dt, rd, rt); +} + +void Assembler::vdup(Condition cond, DataType dt, DRegister rd, Register rt) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_B_E_1 encoded_dt(dt); + if (IsUsingT32()) { + // VDUP{}{}.
, ; T1 + if (encoded_dt.IsValid() && (!rt.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xee800b10U | ((encoded_dt.GetEncodingValue() & 0x1) << 5) | + ((encoded_dt.GetEncodingValue() & 0x2) << 21) | + rd.Encode(7, 16) | (rt.GetCode() << 12)); + AdvanceIT(); + return; + } + } + } else { + // VDUP{}{}.
, ; A1 + if (encoded_dt.IsValid() && cond.IsNotNever() && + (!rt.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitA32(0x0e800b10U | (cond.GetCondition() << 28) | + ((encoded_dt.GetEncodingValue() & 0x1) << 5) | + ((encoded_dt.GetEncodingValue() & 0x2) << 21) | + rd.Encode(7, 16) | (rt.GetCode() << 12)); + return; + } + } + } + Delegate(kVdup, &Assembler::vdup, cond, dt, rd, rt); +} + +void Assembler::vdup(Condition cond, + DataType dt, + DRegister rd, + DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_imm4_1 encoded_dt(dt, rm); + if (IsUsingT32()) { + // VDUP{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00c00U | (encoded_dt.GetEncodingValue() << 16) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VDUP{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00c00U | (encoded_dt.GetEncodingValue() << 16) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVdup, &Assembler::vdup, cond, dt, rd, rm); +} + +void Assembler::vdup(Condition cond, + DataType dt, + QRegister rd, + DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_imm4_1 encoded_dt(dt, rm); + if (IsUsingT32()) { + // VDUP{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00c40U | (encoded_dt.GetEncodingValue() << 16) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VDUP{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00c40U | (encoded_dt.GetEncodingValue() << 16) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVdup, &Assembler::vdup, cond, dt, rd, rm); +} + +void Assembler::veor( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VEOR{}{}{.
} {
}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VEOR{}{}{.
} {
}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3000110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVeor, &Assembler::veor, cond, dt, rd, rn, rm); +} + +void Assembler::veor( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VEOR{}{}{.
} {}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VEOR{}{}{.
} {}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3000150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVeor, &Assembler::veor, cond, dt, rd, rn, rm); +} + +void Assembler::vext(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + if (IsUsingT32()) { + // VEXT{}{}.8 {
}, , , # ; T1 + if (dt.Is(Untyped8) && (imm <= 7)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xefb00000U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0) | (imm << 8)); + AdvanceIT(); + return; + } + } + // VEXT{}{}.
{
}, , , # ; T1 + if ((dt.Is(Untyped16) || dt.Is(Untyped32)) && + (imm <= (128 / dt.GetSize()) - 1) && ((imm % dt.GetSize()) == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm4 = imm / dt.GetSize(); + EmitT32_32(0xefb00000U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0) | (imm4 << 8)); + AdvanceIT(); + return; + } + } + } else { + // VEXT{}{}.8 {
}, , , # ; A1 + if (dt.Is(Untyped8) && (imm <= 7)) { + if (cond.Is(al)) { + EmitA32(0xf2b00000U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0) | (imm << 8)); + return; + } + } + // VEXT{}{}.
{
}, , , # ; A1 + if ((dt.Is(Untyped16) || dt.Is(Untyped32)) && + (imm <= (128 / dt.GetSize()) - 1) && ((imm % dt.GetSize()) == 0)) { + if (cond.Is(al)) { + uint32_t imm4 = imm / dt.GetSize(); + EmitA32(0xf2b00000U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0) | (imm4 << 8)); + return; + } + } + } + } + } + Delegate(kVext, &Assembler::vext, cond, dt, rd, rn, rm, operand); +} + +void Assembler::vext(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + if (IsUsingT32()) { + // VEXT{}{}.8 {}, , , # ; T1 + if (dt.Is(Untyped8) && (imm <= 15)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xefb00040U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0) | (imm << 8)); + AdvanceIT(); + return; + } + } + // VEXT{}{}.
{}, , , # ; T1 + if ((dt.Is(Untyped16) || dt.Is(Untyped32) || dt.Is(Untyped64)) && + (imm <= (64 / dt.GetSize()) - 1) && ((imm % dt.GetSize()) == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm4 = imm / dt.GetSize(); + EmitT32_32(0xefb00040U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0) | (imm4 << 8)); + AdvanceIT(); + return; + } + } + } else { + // VEXT{}{}.8 {}, , , # ; A1 + if (dt.Is(Untyped8) && (imm <= 15)) { + if (cond.Is(al)) { + EmitA32(0xf2b00040U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0) | (imm << 8)); + return; + } + } + // VEXT{}{}.
{}, , , # ; A1 + if ((dt.Is(Untyped16) || dt.Is(Untyped32) || dt.Is(Untyped64)) && + (imm <= (64 / dt.GetSize()) - 1) && ((imm % dt.GetSize()) == 0)) { + if (cond.Is(al)) { + uint32_t imm4 = imm / dt.GetSize(); + EmitA32(0xf2b00040U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0) | (imm4 << 8)); + return; + } + } + } + } + } + Delegate(kVext, &Assembler::vext, cond, dt, rd, rn, rm, operand); +} + +void Assembler::vfma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFMA{}{}.F32
, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000c10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VFMA{}{}.F64
, , ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xeea00b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VFMA{}{}.F32
, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000c10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VFMA{}{}.F64
, , ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0ea00b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVfma, &Assembler::vfma, cond, dt, rd, rn, rm); +} + +void Assembler::vfma( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFMA{}{}.F32 , , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000c50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VFMA{}{}.F32 , , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000c50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVfma, &Assembler::vfma, cond, dt, rd, rn, rm); +} + +void Assembler::vfma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFMA{}{}.F32 , , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xeea00a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VFMA{}{}.F32 , , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0ea00a00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVfma, &Assembler::vfma, cond, dt, rd, rn, rm); +} + +void Assembler::vfms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFMS{}{}.F32
, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200c10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VFMS{}{}.F64
, , ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xeea00b40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VFMS{}{}.F32
, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200c10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VFMS{}{}.F64
, , ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0ea00b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVfms, &Assembler::vfms, cond, dt, rd, rn, rm); +} + +void Assembler::vfms( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFMS{}{}.F32 , , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200c50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VFMS{}{}.F32 , , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200c50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVfms, &Assembler::vfms, cond, dt, rd, rn, rm); +} + +void Assembler::vfms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFMS{}{}.F32 , , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xeea00a40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VFMS{}{}.F32 , , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0ea00a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVfms, &Assembler::vfms, cond, dt, rd, rn, rm); +} + +void Assembler::vfnma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFNMA{}{}.F32 , , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xee900a40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VFNMA{}{}.F32 , , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e900a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVfnma, &Assembler::vfnma, cond, dt, rd, rn, rm); +} + +void Assembler::vfnma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFNMA{}{}.F64
, , ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xee900b40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VFNMA{}{}.F64
, , ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e900b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVfnma, &Assembler::vfnma, cond, dt, rd, rn, rm); +} + +void Assembler::vfnms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFNMS{}{}.F32 , , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xee900a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VFNMS{}{}.F32 , , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e900a00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVfnms, &Assembler::vfnms, cond, dt, rd, rn, rm); +} + +void Assembler::vfnms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VFNMS{}{}.F64
, , ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xee900b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VFNMS{}{}.F64
, , ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e900b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVfnms, &Assembler::vfnms, cond, dt, rd, rn, rm); +} + +void Assembler::vhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VHADD{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000000U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VHADD{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000000U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVhadd, &Assembler::vhadd, cond, dt, rd, rn, rm); +} + +void Assembler::vhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VHADD{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000040U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VHADD{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000040U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVhadd, &Assembler::vhadd, cond, dt, rd, rn, rm); +} + +void Assembler::vhsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VHSUB{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000200U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VHSUB{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000200U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVhsub, &Assembler::vhsub, cond, dt, rd, rn, rm); +} + +void Assembler::vhsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VHSUB{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000240U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VHSUB{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000240U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVhsub, &Assembler::vhsub, cond, dt, rd, rn, rm); +} + +void Assembler::vld1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Dt_size_6 encoded_dt(dt); + Dt_size_7 encoded_dt_2(dt); + Align_align_1 encoded_align_1(align, nreglist); + Align_a_1 encoded_align_2(align, dt); + Align_index_align_1 encoded_align_3(align, nreglist, dt); + if (IsUsingT32()) { + // VLD1{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitT32_32(0xf920000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD1{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitT32_32(0xf920000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD1{}{}.
, [{:}] ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 2) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitT32_32(0xf9a00c0fU | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD1{}{}.
, [{:}]! ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 2) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitT32_32(0xf9a00c0dU | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD1{}{}.
, [{:}] ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && operand.IsOffset() && + encoded_align_3.IsValid() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a0000fU | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD1{}{}.
, [{:}]! ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && operand.IsPostIndex() && + encoded_align_3.IsValid() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a0000dU | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VLD1{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitA32(0xf420000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VLD1{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitA32(0xf420000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VLD1{}{}.
, [{:}] ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 2) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitA32(0xf4a00c0fU | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + return; + } + } + // VLD1{}{}.
, [{:}]! ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 2) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitA32(0xf4a00c0dU | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + return; + } + } + // VLD1{}{}.
, [{:}] ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && operand.IsOffset() && + encoded_align_3.IsValid() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a0000fU | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + // VLD1{}{}.
, [{:}]! ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && operand.IsPostIndex() && + encoded_align_3.IsValid() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a0000dU | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Register rm = operand.GetOffsetRegister(); + Dt_size_6 encoded_dt(dt); + Dt_size_7 encoded_dt_2(dt); + Align_align_1 encoded_align_1(align, nreglist); + Align_a_1 encoded_align_2(align, dt); + Align_index_align_1 encoded_align_3(align, nreglist, dt); + if (IsUsingT32()) { + // VLD1{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitT32_32(0xf9200000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VLD1{}{}.
, [{:}], ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 2) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitT32_32(0xf9a00c00U | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VLD1{}{}.
, [{:}], ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && !rm.IsPC() && !rm.IsSP() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a00000U | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | + rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VLD1{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitA32(0xf4200000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VLD1{}{}.
, [{:}], ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 2) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitA32(0xf4a00c00U | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VLD1{}{}.
, [{:}], ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && !rm.IsPC() && !rm.IsSP() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a00000U | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVld1, &Assembler::vld1, cond, dt, nreglist, operand); +} + +void Assembler::vld2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Dt_size_7 encoded_dt(dt); + Align_align_2 encoded_align_1(align, nreglist); + Align_a_2 encoded_align_2(align, dt); + Align_index_align_2 encoded_align_3(align, nreglist, dt); + if (IsUsingT32()) { + // VLD2{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitT32_32(0xf920000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD2{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitT32_32(0xf920000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD2{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9a00d0fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD2{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9a00d0dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD2{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsOffset() && encoded_align_3.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a0010fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD2{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsPostIndex() && encoded_align_3.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a0010dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VLD2{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitA32(0xf420000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VLD2{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitA32(0xf420000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VLD2{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4a00d0fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + return; + } + } + // VLD2{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4a00d0dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + return; + } + } + // VLD2{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsOffset() && encoded_align_3.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a0010fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + // VLD2{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsPostIndex() && encoded_align_3.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a0010dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Register rm = operand.GetOffsetRegister(); + Dt_size_7 encoded_dt(dt); + Align_align_2 encoded_align_1(align, nreglist); + Align_a_2 encoded_align_2(align, dt); + Align_index_align_2 encoded_align_3(align, nreglist, dt); + if (IsUsingT32()) { + // VLD2{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitT32_32(0xf9200000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VLD2{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9a00d00U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VLD2{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a00100U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | + rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VLD2{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitA32(0xf4200000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VLD2{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4a00d00U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VLD2{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a00100U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVld2, &Assembler::vld2, cond, dt, nreglist, operand); +} + +void Assembler::vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Dt_size_7 encoded_dt(dt); + Align_align_3 encoded_align_1(align); + if (IsUsingT32()) { + // VLD3{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitT32_32(0xf920000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD3{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitT32_32(0xf920000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VLD3{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitA32(0xf420000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VLD3{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitA32(0xf420000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Register rm = operand.GetOffsetRegister(); + Dt_size_7 encoded_dt(dt); + Align_align_3 encoded_align_1(align); + if (IsUsingT32()) { + // VLD3{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitT32_32(0xf9200000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VLD3{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitA32(0xf4200000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVld3, &Assembler::vld3, cond, dt, nreglist, operand); +} + +void Assembler::vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Dt_size_7 encoded_dt(dt); + Index_1 encoded_align_1(nreglist, dt); + if (IsUsingT32()) { + // VLD3{}{}.
, [] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9a00e0fU | (encoded_dt.GetEncodingValue() << 6) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD3{}{}.
, []! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9a00e0dU | (encoded_dt.GetEncodingValue() << 6) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD3{}{}.
, [] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a0020fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD3{}{}.
, []! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a0020dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VLD3{}{}.
, [] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4a00e0fU | (encoded_dt.GetEncodingValue() << 6) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + return; + } + } + // VLD3{}{}.
, []! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4a00e0dU | (encoded_dt.GetEncodingValue() << 6) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + return; + } + } + // VLD3{}{}.
, [] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a0020fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + // VLD3{}{}.
, []! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a0020dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Sign sign = operand.GetSign(); + Register rm = operand.GetOffsetRegister(); + Dt_size_7 encoded_dt(dt); + Index_1 encoded_align_1(nreglist, dt); + if (IsUsingT32()) { + // VLD3{}{}.
, [], # ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + sign.IsPlus() && operand.IsPostIndex() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9a00e00U | (encoded_dt.GetEncodingValue() << 6) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VLD3{}{}.
, [], # ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + sign.IsPlus() && operand.IsPostIndex() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a00200U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | + rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VLD3{}{}.
, [], # ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + sign.IsPlus() && operand.IsPostIndex() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4a00e00U | (encoded_dt.GetEncodingValue() << 6) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VLD3{}{}.
, [], # ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + sign.IsPlus() && operand.IsPostIndex() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a00200U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVld3, &Assembler::vld3, cond, dt, nreglist, operand); +} + +void Assembler::vld4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Dt_size_7 encoded_dt(dt); + Dt_size_8 encoded_dt_2(dt, align); + Align_align_4 encoded_align_1(align); + Align_a_3 encoded_align_2(align, dt); + Align_index_align_3 encoded_align_3(align, nreglist, dt); + if (IsUsingT32()) { + // VLD4{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf920000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD4{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf920000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD4{}{}.
, [{:}] ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9a00f0fU | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD4{}{}.
, [{:}]! ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9a00f0dU | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD4{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_3.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a0030fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VLD4{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_3.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a0030dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VLD4{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf420000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VLD4{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf420000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VLD4{}{}.
, [{:}] ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4a00f0fU | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + return; + } + } + // VLD4{}{}.
, [{:}]! ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4a00f0dU | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16)); + return; + } + } + // VLD4{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_3.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a0030fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + // VLD4{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_3.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a0030dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Register rm = operand.GetOffsetRegister(); + Dt_size_7 encoded_dt(dt); + Dt_size_8 encoded_dt_2(dt, align); + Align_align_4 encoded_align_1(align); + Align_a_3 encoded_align_2(align, dt); + Align_index_align_3 encoded_align_3(align, nreglist, dt); + if (IsUsingT32()) { + // VLD4{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9200000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VLD4{}{}.
, [{:}], ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9a00f00U | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VLD4{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9a00300U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | + rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VLD4{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4200000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VLD4{}{}.
, [{:}], ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferAllLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4a00f00U | (encoded_dt_2.GetEncodingValue() << 6) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 5) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VLD4{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4a00300U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_3.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVld4, &Assembler::vld4, cond, dt, nreglist, operand); +} + +void Assembler::vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VLDM{}{}{.} {!}, ; T1 + if ((((dreglist.GetLength() <= 16) && !rn.IsPC()) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitT32_32(0xec900b00U | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | dreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VLDM{}{}{.} {!}, ; A1 + if (cond.IsNotNever() && (((dreglist.GetLength() <= 16) && + (!rn.IsPC() || !write_back.DoesWriteBack())) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitA32(0x0c900b00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | dreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVldm, &Assembler::vldm, cond, dt, rn, write_back, dreglist); +} + +void Assembler::vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VLDM{}{}{.} {!}, ; T2 + if ((!rn.IsPC() || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitT32_32(0xec900a00U | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | sreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VLDM{}{}{.} {!}, ; A2 + if (cond.IsNotNever() && + ((!rn.IsPC() || !write_back.DoesWriteBack()) || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitA32(0x0c900a00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | sreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVldm, &Assembler::vldm, cond, dt, rn, write_back, sreglist); +} + +void Assembler::vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VLDMDB{}{}{.} !, ; T1 + if (write_back.DoesWriteBack() && + (((dreglist.GetLength() <= 16) && !rn.IsPC()) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitT32_32(0xed300b00U | (rn.GetCode() << 16) | dreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VLDMDB{}{}{.} !, ; A1 + if (write_back.DoesWriteBack() && cond.IsNotNever() && + (((dreglist.GetLength() <= 16) && !rn.IsPC()) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitA32(0x0d300b00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + dreg.Encode(22, 12) | (len & 0xff)); + return; + } + } + Delegate(kVldmdb, &Assembler::vldmdb, cond, dt, rn, write_back, dreglist); +} + +void Assembler::vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VLDMDB{}{}{.} !, ; T2 + if (write_back.DoesWriteBack() && (!rn.IsPC() || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitT32_32(0xed300a00U | (rn.GetCode() << 16) | sreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VLDMDB{}{}{.} !, ; A2 + if (write_back.DoesWriteBack() && cond.IsNotNever() && + (!rn.IsPC() || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitA32(0x0d300a00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + sreg.Encode(22, 12) | (len & 0xff)); + return; + } + } + Delegate(kVldmdb, &Assembler::vldmdb, cond, dt, rn, write_back, sreglist); +} + +void Assembler::vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VLDMIA{}{}{.} {!}, ; T1 + if ((((dreglist.GetLength() <= 16) && !rn.IsPC()) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitT32_32(0xec900b00U | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | dreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VLDMIA{}{}{.} {!}, ; A1 + if (cond.IsNotNever() && (((dreglist.GetLength() <= 16) && + (!rn.IsPC() || !write_back.DoesWriteBack())) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitA32(0x0c900b00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | dreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVldmia, &Assembler::vldmia, cond, dt, rn, write_back, dreglist); +} + +void Assembler::vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VLDMIA{}{}{.} {!}, ; T2 + if ((!rn.IsPC() || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitT32_32(0xec900a00U | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | sreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VLDMIA{}{}{.} {!}, ; A2 + if (cond.IsNotNever() && + ((!rn.IsPC() || !write_back.DoesWriteBack()) || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitA32(0x0c900a00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | sreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVldmia, &Assembler::vldmia, cond, dt, rn, write_back, sreglist); +} + +void Assembler::vldr(Condition cond, + DataType dt, + DRegister rd, + Location* location) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Location::Offset offset = + location->IsBound() + ? location->GetLocation() - + AlignDown(GetCursorOffset() + GetArchitectureStatePCOffset(), 4) + : 0; + if (IsUsingT32()) { + // VLDR{}{}{.64}
,
,
,
,
, [PC, #<_plusminus_>] ; T1 + if (dt.IsNoneOr(Untyped64) && (offset >= -1020) && (offset <= 1020) && + ((offset % 4) == 0) && rn.Is(pc) && operand.IsOffset()) { + uint32_t sign = operand.GetSign().IsPlus() ? 1 : 0; + uint32_t offset_ = abs(offset) >> 2; + EmitT32_32(0xed1f0b00U | rd.Encode(22, 12) | offset_ | (sign << 23)); + AdvanceIT(); + return; + } + // VLDR{}{}{.64}
, [{, #{+/-}}] ; T1 + if (dt.IsNoneOr(Untyped64) && (offset >= -1020) && (offset <= 1020) && + ((offset % 4) == 0) && operand.IsOffset() && + ((rn.GetCode() & 0xf) != 0xf)) { + uint32_t sign = operand.GetSign().IsPlus() ? 1 : 0; + uint32_t offset_ = abs(offset) >> 2; + EmitT32_32(0xed100b00U | rd.Encode(22, 12) | (rn.GetCode() << 16) | + offset_ | (sign << 23)); + AdvanceIT(); + return; + } + } else { + // VLDR{}{}{.64}
, [PC, #<_plusminus_>] ; A1 + if (dt.IsNoneOr(Untyped64) && (offset >= -1020) && (offset <= 1020) && + ((offset % 4) == 0) && rn.Is(pc) && operand.IsOffset() && + cond.IsNotNever()) { + uint32_t sign = operand.GetSign().IsPlus() ? 1 : 0; + uint32_t offset_ = abs(offset) >> 2; + EmitA32(0x0d1f0b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + offset_ | (sign << 23)); + return; + } + // VLDR{}{}{.64}
, [{, #{+/-}}] ; A1 + if (dt.IsNoneOr(Untyped64) && (offset >= -1020) && (offset <= 1020) && + ((offset % 4) == 0) && operand.IsOffset() && cond.IsNotNever() && + ((rn.GetCode() & 0xf) != 0xf)) { + uint32_t sign = operand.GetSign().IsPlus() ? 1 : 0; + uint32_t offset_ = abs(offset) >> 2; + EmitA32(0x0d100b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + (rn.GetCode() << 16) | offset_ | (sign << 23)); + return; + } + } + } + Delegate(kVldr, &Assembler::vldr, cond, dt, rd, operand); +} + +void Assembler::vldr(Condition cond, + DataType dt, + SRegister rd, + Location* location) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Location::Offset offset = + location->IsBound() + ? location->GetLocation() - + AlignDown(GetCursorOffset() + GetArchitectureStatePCOffset(), 4) + : 0; + if (IsUsingT32()) { + // VLDR{}{}{.32} ,
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000f00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMAX{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000600U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMAX{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000f00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMAX{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000600U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmax, &Assembler::vmax, cond, dt, rd, rn, rm); +} + +void Assembler::vmax( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VMAX{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000f40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMAX{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000640U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMAX{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000f40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMAX{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000640U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmax, &Assembler::vmax, cond, dt, rd, rn, rm); +} + +void Assembler::vmaxnm(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VMAXNM{}.F32
, , ; T1 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xff000f10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VMAXNM{}.F64
, , ; T2 + if (OutsideITBlock() && dt.Is(F64)) { + EmitT32_32(0xfe800b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMAXNM{}.F32
, , ; A1 + if (dt.Is(F32)) { + EmitA32(0xf3000f10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + // VMAXNM{}.F64
, , ; A2 + if (dt.Is(F64)) { + EmitA32(0xfe800b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVmaxnm, &Assembler::vmaxnm, dt, rd, rn, rm); +} + +void Assembler::vmaxnm(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VMAXNM{}.F32 , , ; T1 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xff000f50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMAXNM{}.F32 , , ; A1 + if (dt.Is(F32)) { + EmitA32(0xf3000f50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVmaxnm, &Assembler::vmaxnm, dt, rd, rn, rm); +} + +void Assembler::vmaxnm(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VMAXNM{}.F32 , , ; T2 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xfe800a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMAXNM{}.F32 , , ; A2 + if (dt.Is(F32)) { + EmitA32(0xfe800a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVmaxnm, &Assembler::vmaxnm, dt, rd, rn, rm); +} + +void Assembler::vmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VMIN{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200f00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMIN{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000610U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMIN{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200f00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMIN{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000610U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmin, &Assembler::vmin, cond, dt, rd, rn, rm); +} + +void Assembler::vmin( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VMIN{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200f40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMIN{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000650U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMIN{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200f40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMIN{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000650U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmin, &Assembler::vmin, cond, dt, rd, rn, rm); +} + +void Assembler::vminnm(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VMINNM{}.F32
, , ; T1 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xff200f10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VMINNM{}.F64
, , ; T2 + if (OutsideITBlock() && dt.Is(F64)) { + EmitT32_32(0xfe800b40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMINNM{}.F32
, , ; A1 + if (dt.Is(F32)) { + EmitA32(0xf3200f10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + // VMINNM{}.F64
, , ; A2 + if (dt.Is(F64)) { + EmitA32(0xfe800b40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVminnm, &Assembler::vminnm, dt, rd, rn, rm); +} + +void Assembler::vminnm(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VMINNM{}.F32 , , ; T1 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xff200f50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMINNM{}.F32 , , ; A1 + if (dt.Is(F32)) { + EmitA32(0xf3200f50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVminnm, &Assembler::vminnm, dt, rd, rn, rm); +} + +void Assembler::vminnm(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VMINNM{}.F32 , , ; T2 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xfe800a40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMINNM{}.F32 , , ; A2 + if (dt.Is(F32)) { + EmitA32(0xfe800a40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVminnm, &Assembler::vminnm, dt, rd, rn, rm); +} + +void Assembler::vmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_9 encoded_dt(dt); + if (IsUsingT32()) { + // VMLA{}{}.
, , ; T1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800040U | (encoded_dt.GetTypeEncodingValue() << 8) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLA{}{}.
, , ; A1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al)) { + EmitA32(0xf2800040U | (encoded_dt.GetTypeEncodingValue() << 8) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVmla, &Assembler::vmla, cond, dt, rd, rn, rm); +} + +void Assembler::vmla( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_9 encoded_dt(dt); + if (IsUsingT32()) { + // VMLA{}{}. , , ; T1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff800040U | (encoded_dt.GetTypeEncodingValue() << 8) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLA{}{}. , , ; A1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al)) { + EmitA32(0xf3800040U | (encoded_dt.GetTypeEncodingValue() << 8) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVmla, &Assembler::vmla, cond, dt, rd, rn, rm); +} + +void Assembler::vmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_10 encoded_dt(dt); + if (IsUsingT32()) { + // VMLA{}{}.F32
, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000d10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMLA{}{}.F64
, , ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xee000b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VMLA{}{}.
, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000900U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLA{}{}.F32
, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000d10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMLA{}{}.F64
, , ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e000b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + // VMLA{}{}.
, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000900U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmla, &Assembler::vmla, cond, dt, rd, rn, rm); +} + +void Assembler::vmla( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_10 encoded_dt(dt); + if (IsUsingT32()) { + // VMLA{}{}.F32 , , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000d50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMLA{}{}. , , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000940U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLA{}{}.F32 , , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000d50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMLA{}{}. , , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000940U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmla, &Assembler::vmla, cond, dt, rd, rn, rm); +} + +void Assembler::vmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMLA{}{}.F32 , , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xee000a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMLA{}{}.F32 , , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e000a00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVmla, &Assembler::vmla, cond, dt, rd, rn, rm); +} + +void Assembler::vmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_11 encoded_dt(dt); + if (IsUsingT32()) { + // VMLAL{}{}. , , ; T1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800240U | (encoded_dt.GetTypeEncodingValue() << 28) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLAL{}{}. , , ; A1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al)) { + EmitA32(0xf2800240U | (encoded_dt.GetTypeEncodingValue() << 24) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVmlal, &Assembler::vmlal, cond, dt, rd, rn, rm); +} + +void Assembler::vmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_12 encoded_dt(dt); + if (IsUsingT32()) { + // VMLAL{}{}. , , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800800U | (encoded_dt.GetTypeEncodingValue() << 28) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLAL{}{}. , , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800800U | (encoded_dt.GetTypeEncodingValue() << 24) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmlal, &Assembler::vmlal, cond, dt, rd, rn, rm); +} + +void Assembler::vmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_9 encoded_dt(dt); + if (IsUsingT32()) { + // VMLS{}{}.
, , ; T1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800440U | (encoded_dt.GetTypeEncodingValue() << 8) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLS{}{}.
, , ; A1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al)) { + EmitA32(0xf2800440U | (encoded_dt.GetTypeEncodingValue() << 8) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVmls, &Assembler::vmls, cond, dt, rd, rn, rm); +} + +void Assembler::vmls( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_9 encoded_dt(dt); + if (IsUsingT32()) { + // VMLS{}{}. , , ; T1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff800440U | (encoded_dt.GetTypeEncodingValue() << 8) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLS{}{}. , , ; A1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al)) { + EmitA32(0xf3800440U | (encoded_dt.GetTypeEncodingValue() << 8) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVmls, &Assembler::vmls, cond, dt, rd, rn, rm); +} + +void Assembler::vmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_10 encoded_dt(dt); + if (IsUsingT32()) { + // VMLS{}{}.F32
, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200d10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMLS{}{}.F64
, , ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xee000b40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VMLS{}{}.
, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000900U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLS{}{}.F32
, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200d10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMLS{}{}.F64
, , ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e000b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + // VMLS{}{}.
, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3000900U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmls, &Assembler::vmls, cond, dt, rd, rn, rm); +} + +void Assembler::vmls( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_10 encoded_dt(dt); + if (IsUsingT32()) { + // VMLS{}{}.F32 , , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200d50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMLS{}{}. , , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000940U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLS{}{}.F32 , , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200d50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMLS{}{}. , , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3000940U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmls, &Assembler::vmls, cond, dt, rd, rn, rm); +} + +void Assembler::vmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMLS{}{}.F32 , , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xee000a40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMLS{}{}.F32 , , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e000a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVmls, &Assembler::vmls, cond, dt, rd, rn, rm); +} + +void Assembler::vmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_11 encoded_dt(dt); + if (IsUsingT32()) { + // VMLSL{}{}. , , ; T1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800640U | (encoded_dt.GetTypeEncodingValue() << 28) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLSL{}{}. , , ; A1 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1)))) { + if (cond.Is(al)) { + EmitA32(0xf2800640U | (encoded_dt.GetTypeEncodingValue() << 24) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVmlsl, &Assembler::vmlsl, cond, dt, rd, rn, rm); +} + +void Assembler::vmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_12 encoded_dt(dt); + if (IsUsingT32()) { + // VMLSL{}{}. , , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800a00U | (encoded_dt.GetTypeEncodingValue() << 28) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMLSL{}{}. , , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800a00U | (encoded_dt.GetTypeEncodingValue() << 24) | + (encoded_dt.GetEncodingValue() << 20) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmlsl, &Assembler::vmlsl, cond, dt, rd, rn, rm); +} + +void Assembler::vmov(Condition cond, Register rt, SRegister rn) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMOV{}{} , ; T1 + if ((!rt.IsPC() || AllowUnpredictable())) { + EmitT32_32(0xee100a10U | (rt.GetCode() << 12) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{} , ; A1 + if (cond.IsNotNever() && (!rt.IsPC() || AllowUnpredictable())) { + EmitA32(0x0e100a10U | (cond.GetCondition() << 28) | (rt.GetCode() << 12) | + rn.Encode(7, 16)); + return; + } + } + Delegate(kVmov, &Assembler::vmov, cond, rt, rn); +} + +void Assembler::vmov(Condition cond, SRegister rn, Register rt) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMOV{}{} , ; T1 + if ((!rt.IsPC() || AllowUnpredictable())) { + EmitT32_32(0xee000a10U | rn.Encode(7, 16) | (rt.GetCode() << 12)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{} , ; A1 + if (cond.IsNotNever() && (!rt.IsPC() || AllowUnpredictable())) { + EmitA32(0x0e000a10U | (cond.GetCondition() << 28) | rn.Encode(7, 16) | + (rt.GetCode() << 12)); + return; + } + } + Delegate(kVmov, &Assembler::vmov, cond, rn, rt); +} + +void Assembler::vmov(Condition cond, Register rt, Register rt2, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMOV{}{} , , ; T1 + if (((!rt.IsPC() && !rt2.IsPC()) || AllowUnpredictable())) { + EmitT32_32(0xec500b10U | (rt.GetCode() << 12) | (rt2.GetCode() << 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{} , , ; A1 + if (cond.IsNotNever() && + ((!rt.IsPC() && !rt2.IsPC()) || AllowUnpredictable())) { + EmitA32(0x0c500b10U | (cond.GetCondition() << 28) | (rt.GetCode() << 12) | + (rt2.GetCode() << 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVmov, &Assembler::vmov, cond, rt, rt2, rm); +} + +void Assembler::vmov(Condition cond, DRegister rm, Register rt, Register rt2) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMOV{}{} , , ; T1 + if (((!rt.IsPC() && !rt2.IsPC()) || AllowUnpredictable())) { + EmitT32_32(0xec400b10U | rm.Encode(5, 0) | (rt.GetCode() << 12) | + (rt2.GetCode() << 16)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{} , , ; A1 + if (cond.IsNotNever() && + ((!rt.IsPC() && !rt2.IsPC()) || AllowUnpredictable())) { + EmitA32(0x0c400b10U | (cond.GetCondition() << 28) | rm.Encode(5, 0) | + (rt.GetCode() << 12) | (rt2.GetCode() << 16)); + return; + } + } + Delegate(kVmov, &Assembler::vmov, cond, rm, rt, rt2); +} + +void Assembler::vmov( + Condition cond, Register rt, Register rt2, SRegister rm, SRegister rm1) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMOV{}{} , , , ; T1 + if ((((rm.GetCode() + 1) % kNumberOfSRegisters) == rm1.GetCode()) && + ((!rt.IsPC() && !rt2.IsPC()) || AllowUnpredictable())) { + EmitT32_32(0xec500a10U | (rt.GetCode() << 12) | (rt2.GetCode() << 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{} , , , ; A1 + if ((((rm.GetCode() + 1) % kNumberOfSRegisters) == rm1.GetCode()) && + cond.IsNotNever() && + ((!rt.IsPC() && !rt2.IsPC()) || AllowUnpredictable())) { + EmitA32(0x0c500a10U | (cond.GetCondition() << 28) | (rt.GetCode() << 12) | + (rt2.GetCode() << 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVmov, &Assembler::vmov, cond, rt, rt2, rm, rm1); +} + +void Assembler::vmov( + Condition cond, SRegister rm, SRegister rm1, Register rt, Register rt2) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMOV{}{} , , , ; T1 + if ((((rm.GetCode() + 1) % kNumberOfSRegisters) == rm1.GetCode()) && + ((!rt.IsPC() && !rt2.IsPC()) || AllowUnpredictable())) { + EmitT32_32(0xec400a10U | rm.Encode(5, 0) | (rt.GetCode() << 12) | + (rt2.GetCode() << 16)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{} , , , ; A1 + if ((((rm.GetCode() + 1) % kNumberOfSRegisters) == rm1.GetCode()) && + cond.IsNotNever() && + ((!rt.IsPC() && !rt2.IsPC()) || AllowUnpredictable())) { + EmitA32(0x0c400a10U | (cond.GetCondition() << 28) | rm.Encode(5, 0) | + (rt.GetCode() << 12) | (rt2.GetCode() << 16)); + return; + } + } + Delegate(kVmov, &Assembler::vmov, cond, rm, rm1, rt, rt2); +} + +void Assembler::vmov(Condition cond, + DataType dt, + DRegisterLane rd, + Register rt) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_opc1_opc2_1 encoded_dt(dt, rd); + if (IsUsingT32()) { + // VMOV{}{}{.} , ; T1 + if (encoded_dt.IsValid() && (!rt.IsPC() || AllowUnpredictable())) { + EmitT32_32(0xee000b10U | ((encoded_dt.GetEncodingValue() & 0x3) << 5) | + ((encoded_dt.GetEncodingValue() & 0xc) << 19) | + rd.Encode(7, 16) | (rt.GetCode() << 12)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{}{.} , ; A1 + if (encoded_dt.IsValid() && cond.IsNotNever() && + (!rt.IsPC() || AllowUnpredictable())) { + EmitA32(0x0e000b10U | (cond.GetCondition() << 28) | + ((encoded_dt.GetEncodingValue() & 0x3) << 5) | + ((encoded_dt.GetEncodingValue() & 0xc) << 19) | rd.Encode(7, 16) | + (rt.GetCode() << 12)); + return; + } + } + Delegate(kVmov, &Assembler::vmov, cond, dt, rd, rt); +} + +void Assembler::vmov(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVmov encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VMOV{}{}.
, # ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32( + 0xef800010U | ((encoded_dt.GetEncodingValue() & 0xf) << 8) | + ((encoded_dt.GetEncodingValue() & 0x10) << 1) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VMOV{}{}.
, # ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800010U | ((encoded_dt.GetEncodingValue() & 0xf) << 8) | + ((encoded_dt.GetEncodingValue() & 0x10) << 1) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsImmediate()) { + ImmediateVFP vfp(operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VMOV{}{}.F64
, # ; T2 + if (dt.Is(F64) && vfp.IsValid()) { + EmitT32_32(0xeeb00b00U | rd.Encode(22, 12) | + (vfp.GetEncodingValue() & 0xf) | + ((vfp.GetEncodingValue() & 0xf0) << 12)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{}.F64
, # ; A2 + if (dt.Is(F64) && vfp.IsValid() && cond.IsNotNever()) { + EmitA32(0x0eb00b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + (vfp.GetEncodingValue() & 0xf) | + ((vfp.GetEncodingValue() & 0xf0) << 12)); + return; + } + } + } + if (operand.IsRegister()) { + DRegister rm = operand.GetRegister(); + if (IsUsingT32()) { + // VMOV{}{}.F64
, ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xeeb00b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VMOV{}{}{.
}
, ; T1 + if (!dt.Is(F64)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200110U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMOV{}{}.F64
, ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb00b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + // VMOV{}{}{.
}
, ; A1 + if (!dt.Is(F64)) { + if (cond.Is(al)) { + EmitA32(0xf2200110U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + } + Delegate(kVmov, &Assembler::vmov, cond, dt, rd, operand); +} + +void Assembler::vmov(Condition cond, + DataType dt, + QRegister rd, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVmov encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VMOV{}{}.
, # ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32( + 0xef800050U | ((encoded_dt.GetEncodingValue() & 0xf) << 8) | + ((encoded_dt.GetEncodingValue() & 0x10) << 1) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VMOV{}{}.
, # ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800050U | ((encoded_dt.GetEncodingValue() & 0xf) << 8) | + ((encoded_dt.GetEncodingValue() & 0x10) << 1) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsRegister()) { + QRegister rm = operand.GetRegister(); + if (IsUsingT32()) { + // VMOV{}{}{.
} , ; T1 + if (!dt.Is(F64)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200150U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMOV{}{}{.
} , ; A1 + if (!dt.Is(F64)) { + if (cond.Is(al)) { + EmitA32(0xf2200150U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + } + Delegate(kVmov, &Assembler::vmov, cond, dt, rd, operand); +} + +void Assembler::vmov(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVFP vfp(operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VMOV{}{}.F32 , # ; T2 + if (dt.Is(F32) && vfp.IsValid()) { + EmitT32_32(0xeeb00a00U | rd.Encode(22, 12) | + (vfp.GetEncodingValue() & 0xf) | + ((vfp.GetEncodingValue() & 0xf0) << 12)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{}.F32 , # ; A2 + if (dt.Is(F32) && vfp.IsValid() && cond.IsNotNever()) { + EmitA32(0x0eb00a00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + (vfp.GetEncodingValue() & 0xf) | + ((vfp.GetEncodingValue() & 0xf0) << 12)); + return; + } + } + } + if (operand.IsRegister()) { + SRegister rm = operand.GetRegister(); + if (IsUsingT32()) { + // VMOV{}{}.F32 , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xeeb00a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{}.F32 , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb00a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmov, &Assembler::vmov, cond, dt, rd, operand); +} + +void Assembler::vmov(Condition cond, + DataType dt, + Register rt, + DRegisterLane rn) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_opc1_opc2_1 encoded_dt(dt, rn); + if (IsUsingT32()) { + // VMOV{}{}{.
} , ; T1 + if (encoded_dt.IsValid() && (!rt.IsPC() || AllowUnpredictable())) { + EmitT32_32(0xee100b10U | ((encoded_dt.GetEncodingValue() & 0x3) << 5) | + ((encoded_dt.GetEncodingValue() & 0xc) << 19) | + ((encoded_dt.GetEncodingValue() & 0x10) << 19) | + (rt.GetCode() << 12) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } else { + // VMOV{}{}{.
} , ; A1 + if (encoded_dt.IsValid() && cond.IsNotNever() && + (!rt.IsPC() || AllowUnpredictable())) { + EmitA32(0x0e100b10U | (cond.GetCondition() << 28) | + ((encoded_dt.GetEncodingValue() & 0x3) << 5) | + ((encoded_dt.GetEncodingValue() & 0xc) << 19) | + ((encoded_dt.GetEncodingValue() & 0x10) << 19) | + (rt.GetCode() << 12) | rn.Encode(7, 16)); + return; + } + } + Delegate(kVmov, &Assembler::vmov, cond, dt, rt, rn); +} + +void Assembler::vmovl(Condition cond, DataType dt, QRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_imm3H_1 encoded_dt(dt); + if (IsUsingT32()) { + // VMOVL{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800a10U | ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 25) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMOVL{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800a10U | ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 21) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmovl, &Assembler::vmovl, cond, dt, rd, rm); +} + +void Assembler::vmovn(Condition cond, DataType dt, DRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VMOVN{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20200U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMOVN{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b20200U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmovn, &Assembler::vmovn, cond, dt, rd, rm); +} + +void Assembler::vmrs(Condition cond, + RegisterOrAPSR_nzcv rt, + SpecialFPRegister spec_reg) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMRS{}{} , ; T1 + EmitT32_32(0xeef00a10U | (rt.GetCode() << 12) | (spec_reg.GetReg() << 16)); + AdvanceIT(); + return; + } else { + // VMRS{}{} , ; A1 + if (cond.IsNotNever()) { + EmitA32(0x0ef00a10U | (cond.GetCondition() << 28) | (rt.GetCode() << 12) | + (spec_reg.GetReg() << 16)); + return; + } + } + Delegate(kVmrs, &Assembler::vmrs, cond, rt, spec_reg); +} + +void Assembler::vmsr(Condition cond, SpecialFPRegister spec_reg, Register rt) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMSR{}{} , ; T1 + if ((!rt.IsPC() || AllowUnpredictable())) { + EmitT32_32(0xeee00a10U | (spec_reg.GetReg() << 16) | + (rt.GetCode() << 12)); + AdvanceIT(); + return; + } + } else { + // VMSR{}{} , ; A1 + if (cond.IsNotNever() && (!rt.IsPC() || AllowUnpredictable())) { + EmitA32(0x0ee00a10U | (cond.GetCondition() << 28) | + (spec_reg.GetReg() << 16) | (rt.GetCode() << 12)); + return; + } + } + Delegate(kVmsr, &Assembler::vmsr, cond, spec_reg, rt); +} + +void Assembler::vmul(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VMUL{}{}.
{
}, , [] ; T1 + if (encoded_dt.IsValid() && + ((dt.Is(I16) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(I16) && (index <= 1) && (dm.GetCode() <= 15)))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t shift = 4; + if (dt.Is(I16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitT32_32(0xef800840U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + AdvanceIT(); + return; + } + } + } else { + // VMUL{}{}.
{
}, , [] ; A1 + if (encoded_dt.IsValid() && + ((dt.Is(I16) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(I16) && (index <= 1) && (dm.GetCode() <= 15)))) { + if (cond.Is(al)) { + uint32_t shift = 4; + if (dt.Is(I16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitA32(0xf2800840U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + return; + } + } + } + Delegate(kVmul, &Assembler::vmul, cond, dt, rd, rn, dm, index); +} + +void Assembler::vmul(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VMUL{}{}.
{}, , [] ; T1 + if (encoded_dt.IsValid() && + ((dt.Is(I16) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(I16) && (index <= 1) && (dm.GetCode() <= 15)))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t shift = 4; + if (dt.Is(I16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitT32_32(0xff800840U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + AdvanceIT(); + return; + } + } + } else { + // VMUL{}{}.
{}, , [] ; A1 + if (encoded_dt.IsValid() && + ((dt.Is(I16) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(I16) && (index <= 1) && (dm.GetCode() <= 15)))) { + if (cond.Is(al)) { + uint32_t shift = 4; + if (dt.Is(I16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitA32(0xf3800840U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + return; + } + } + } + Delegate(kVmul, &Assembler::vmul, cond, dt, rd, rn, dm, index); +} + +void Assembler::vmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VMUL{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000d10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMUL{}{}.F64 {
}, , ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xee200b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VMUL{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000910U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMUL{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000d10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMUL{}{}.F64 {
}, , ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e200b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + // VMUL{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000910U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmul, &Assembler::vmul, cond, dt, rd, rn, rm); +} + +void Assembler::vmul( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VMUL{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000d50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VMUL{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000950U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMUL{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000d50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VMUL{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000950U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmul, &Assembler::vmul, cond, dt, rd, rn, rm); +} + +void Assembler::vmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VMUL{}{}.F32 {}, , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xee200a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMUL{}{}.F32 {}, , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e200a00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVmul, &Assembler::vmul, cond, dt, rd, rn, rm); +} + +void Assembler::vmull(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VMULL{}{}.
, , [] ; T1 + if (encoded_dt.IsValid() && + (((dt.Is(S16) || dt.Is(U16)) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(S16) && !dt.Is(U16) && (index <= 1) && + (dm.GetCode() <= 15)))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t shift = 4; + if (dt.Is(S16) || dt.Is(U16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitT32_32(0xef800a40U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + AdvanceIT(); + return; + } + } + } else { + // VMULL{}{}.
, , [] ; A1 + if (encoded_dt.IsValid() && + (((dt.Is(S16) || dt.Is(U16)) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(S16) && !dt.Is(U16) && (index <= 1) && + (dm.GetCode() <= 15)))) { + if (cond.Is(al)) { + uint32_t shift = 4; + if (dt.Is(S16) || dt.Is(U16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitA32(0xf2800a40U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + return; + } + } + } + Delegate(kVmull, &Assembler::vmull, cond, dt, rd, rn, dm, index); +} + +void Assembler::vmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VMULL{}{}.
, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800c00U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + ((encoded_dt.GetEncodingValue() & 0x8) << 6) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VMULL{}{}.
, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800c00U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + ((encoded_dt.GetEncodingValue() & 0x8) << 6) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmull, &Assembler::vmull, cond, dt, rd, rn, rm); +} + +void Assembler::vmvn(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVmvn encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VMVN{}{}.
, # ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800030U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VMVN{}{}.
, # ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800030U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsRegister()) { + DRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VMVN{}{}{.
}
, ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00580U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMVN{}{}{.
}
, ; A1 + if (cond.Is(al)) { + EmitA32(0xf3b00580U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmvn, &Assembler::vmvn, cond, dt, rd, operand); +} + +void Assembler::vmvn(Condition cond, + DataType dt, + QRegister rd, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVmvn encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VMVN{}{}.
, # ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800070U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VMVN{}{}.
, # ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800070U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsRegister()) { + QRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VMVN{}{}{.
} , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb005c0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VMVN{}{}{.
} , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3b005c0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVmvn, &Assembler::vmvn, cond, dt, rd, operand); +} + +void Assembler::vneg(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VNEG{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb10380U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VNEG{}{}.F64
, ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xeeb10b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VNEG{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b10380U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + // VNEG{}{}.F64
, ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb10b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVneg, &Assembler::vneg, cond, dt, rd, rm); +} + +void Assembler::vneg(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VNEG{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb103c0U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VNEG{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b103c0U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 8) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVneg, &Assembler::vneg, cond, dt, rd, rm); +} + +void Assembler::vneg(Condition cond, DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VNEG{}{}.F32 , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xeeb10a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VNEG{}{}.F32 , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb10a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVneg, &Assembler::vneg, cond, dt, rd, rm); +} + +void Assembler::vnmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VNMLA{}{}.F32 , , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xee100a40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VNMLA{}{}.F32 , , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e100a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVnmla, &Assembler::vnmla, cond, dt, rd, rn, rm); +} + +void Assembler::vnmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VNMLA{}{}.F64
, , ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xee100b40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VNMLA{}{}.F64
, , ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e100b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVnmla, &Assembler::vnmla, cond, dt, rd, rn, rm); +} + +void Assembler::vnmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VNMLS{}{}.F32 , , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xee100a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VNMLS{}{}.F32 , , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e100a00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVnmls, &Assembler::vnmls, cond, dt, rd, rn, rm); +} + +void Assembler::vnmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VNMLS{}{}.F64
, , ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xee100b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VNMLS{}{}.F64
, , ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e100b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVnmls, &Assembler::vnmls, cond, dt, rd, rn, rm); +} + +void Assembler::vnmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VNMUL{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xee200a40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VNMUL{}{}.F32 {}, , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e200a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVnmul, &Assembler::vnmul, cond, dt, rd, rn, rm); +} + +void Assembler::vnmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VNMUL{}{}.F64 {
}, , ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xee200b40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VNMUL{}{}.F64 {
}, , ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e200b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVnmul, &Assembler::vnmul, cond, dt, rd, rn, rm); +} + +void Assembler::vorn(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVorn encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VORN{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800010U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VORN{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al)) { + EmitA32(0xf2800010U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsRegister()) { + DRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VORN{}{}{.
} {
}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef300110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VORN{}{}{.
} {
}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf2300110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVorn, &Assembler::vorn, cond, dt, rd, rn, operand); +} + +void Assembler::vorn(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + ImmediateVorn encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VORN{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800050U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VORN{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al)) { + EmitA32(0xf2800050U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + if (operand.IsRegister()) { + QRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VORN{}{}{.
} {}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef300150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VORN{}{}{.
} {}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf2300150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVorn, &Assembler::vorn, cond, dt, rd, rn, operand); +} + +void Assembler::vorr(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsRegister()) { + DRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VORR{}{}{.
} {
}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VORR{}{}{.
} {
}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf2200110U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + if (operand.IsImmediate()) { + ImmediateVorr encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VORR{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800010U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VORR{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al)) { + EmitA32(0xf2800010U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + Delegate(kVorr, &Assembler::vorr, cond, dt, rd, rn, operand); +} + +void Assembler::vorr(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsRegister()) { + QRegister rm = operand.GetRegister(); + USE(dt); + if (IsUsingT32()) { + // VORR{}{}{.
} {}, , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VORR{}{}{.
} {}, , ; A1 + if (cond.Is(al)) { + EmitA32(0xf2200150U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + if (operand.IsImmediate()) { + ImmediateVorr encoded_dt(dt, operand.GetNeonImmediate()); + if (IsUsingT32()) { + // VORR{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800050U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | + (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 21)); + AdvanceIT(); + return; + } + } + } else { + // VORR{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && rd.Is(rn)) { + if (cond.Is(al)) { + EmitA32(0xf2800050U | (encoded_dt.GetEncodingValue() << 8) | + rd.Encode(22, 12) | (encoded_dt.GetEncodedImmediate() & 0xf) | + ((encoded_dt.GetEncodedImmediate() & 0x70) << 12) | + ((encoded_dt.GetEncodedImmediate() & 0x80) << 17)); + return; + } + } + } + } + Delegate(kVorr, &Assembler::vorr, cond, dt, rd, rn, operand); +} + +void Assembler::vpadal(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VPADAL{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00600U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 5) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VPADAL{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00600U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 5) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVpadal, &Assembler::vpadal, cond, dt, rd, rm); +} + +void Assembler::vpadal(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VPADAL{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00640U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 5) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VPADAL{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00640U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 5) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVpadal, &Assembler::vpadal, cond, dt, rd, rm); +} + +void Assembler::vpadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_4 encoded_dt(dt); + if (IsUsingT32()) { + // VPADD{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000d00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VPADD{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000b10U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VPADD{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000d00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VPADD{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000b10U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVpadd, &Assembler::vpadd, cond, dt, rd, rn, rm); +} + +void Assembler::vpaddl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VPADDL{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00200U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 5) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VPADDL{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00200U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 5) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVpaddl, &Assembler::vpaddl, cond, dt, rd, rm); +} + +void Assembler::vpaddl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VPADDL{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00240U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 5) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VPADDL{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00240U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 5) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVpaddl, &Assembler::vpaddl, cond, dt, rd, rm); +} + +void Assembler::vpmax( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VPMAX{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000f00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VPMAX{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000a00U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VPMAX{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3000f00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VPMAX{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000a00U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVpmax, &Assembler::vpmax, cond, dt, rd, rn, rm); +} + +void Assembler::vpmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VPMIN{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff200f00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VPMIN{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000a10U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VPMIN{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf3200f00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VPMIN{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000a10U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVpmin, &Assembler::vpmin, cond, dt, rd, rn, rm); +} + +void Assembler::vpop(Condition cond, DataType dt, DRegisterList dreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VPOP{}{}{.} ; T1 + if (((dreglist.GetLength() <= 16) || AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitT32_32(0xecbd0b00U | dreg.Encode(22, 12) | (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VPOP{}{}{.} ; A1 + if (cond.IsNotNever() && + ((dreglist.GetLength() <= 16) || AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitA32(0x0cbd0b00U | (cond.GetCondition() << 28) | dreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVpop, &Assembler::vpop, cond, dt, dreglist); +} + +void Assembler::vpop(Condition cond, DataType dt, SRegisterList sreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VPOP{}{}{.} ; T2 + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitT32_32(0xecbd0a00U | sreg.Encode(22, 12) | (len & 0xff)); + AdvanceIT(); + return; + } else { + // VPOP{}{}{.} ; A2 + if (cond.IsNotNever()) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitA32(0x0cbd0a00U | (cond.GetCondition() << 28) | sreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVpop, &Assembler::vpop, cond, dt, sreglist); +} + +void Assembler::vpush(Condition cond, DataType dt, DRegisterList dreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VPUSH{}{}{.} ; T1 + if (((dreglist.GetLength() <= 16) || AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitT32_32(0xed2d0b00U | dreg.Encode(22, 12) | (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VPUSH{}{}{.} ; A1 + if (cond.IsNotNever() && + ((dreglist.GetLength() <= 16) || AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitA32(0x0d2d0b00U | (cond.GetCondition() << 28) | dreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVpush, &Assembler::vpush, cond, dt, dreglist); +} + +void Assembler::vpush(Condition cond, DataType dt, SRegisterList sreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VPUSH{}{}{.} ; T2 + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitT32_32(0xed2d0a00U | sreg.Encode(22, 12) | (len & 0xff)); + AdvanceIT(); + return; + } else { + // VPUSH{}{}{.} ; A2 + if (cond.IsNotNever()) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitA32(0x0d2d0a00U | (cond.GetCondition() << 28) | sreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVpush, &Assembler::vpush, cond, dt, sreglist); +} + +void Assembler::vqabs(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_5 encoded_dt(dt); + if (IsUsingT32()) { + // VQABS{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00700U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQABS{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00700U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqabs, &Assembler::vqabs, cond, dt, rd, rm); +} + +void Assembler::vqabs(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_5 encoded_dt(dt); + if (IsUsingT32()) { + // VQABS{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00740U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQABS{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00740U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqabs, &Assembler::vqabs, cond, dt, rd, rm); +} + +void Assembler::vqadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VQADD{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000010U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQADD{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000010U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqadd, &Assembler::vqadd, cond, dt, rd, rn, rm); +} + +void Assembler::vqadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VQADD{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000050U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQADD{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000050U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqadd, &Assembler::vqadd, cond, dt, rd, rn, rm); +} + +void Assembler::vqdmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMLAL{}{}.
, , ; T1 + if (encoded_dt.IsValid() && (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800900U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQDMLAL{}{}.
, , ; A1 + if (encoded_dt.IsValid() && (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + EmitA32(0xf2800900U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqdmlal, &Assembler::vqdmlal, cond, dt, rd, rn, rm); +} + +void Assembler::vqdmlal(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMLAL{}{}.
, , [] ; T2 + if (encoded_dt.IsValid() && + ((dt.Is(S16) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(S16) && (index <= 1) && (dm.GetCode() <= 15))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t shift = 4; + if (dt.Is(S16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitT32_32(0xef800340U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + AdvanceIT(); + return; + } + } + } else { + // VQDMLAL{}{}.
, , [] ; A2 + if (encoded_dt.IsValid() && + ((dt.Is(S16) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(S16) && (index <= 1) && (dm.GetCode() <= 15))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + uint32_t shift = 4; + if (dt.Is(S16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitA32(0xf2800340U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + return; + } + } + } + Delegate(kVqdmlal, &Assembler::vqdmlal, cond, dt, rd, rn, dm, index); +} + +void Assembler::vqdmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMLSL{}{}.
, , ; T1 + if (encoded_dt.IsValid() && (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800b00U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQDMLSL{}{}.
, , ; A1 + if (encoded_dt.IsValid() && (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + EmitA32(0xf2800b00U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqdmlsl, &Assembler::vqdmlsl, cond, dt, rd, rn, rm); +} + +void Assembler::vqdmlsl(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMLSL{}{}.
, , [] ; T2 + if (encoded_dt.IsValid() && + ((dt.Is(S16) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(S16) && (index <= 1) && (dm.GetCode() <= 15))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t shift = 4; + if (dt.Is(S16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitT32_32(0xef800740U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + AdvanceIT(); + return; + } + } + } else { + // VQDMLSL{}{}.
, , [] ; A2 + if (encoded_dt.IsValid() && + ((dt.Is(S16) && (index <= 3) && (dm.GetCode() <= 7)) || + (!dt.Is(S16) && (index <= 1) && (dm.GetCode() <= 15))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + uint32_t shift = 4; + if (dt.Is(S16)) { + shift = 3; + } + uint32_t mvm = dm.GetCode() | index << shift; + EmitA32(0xf2800740U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | (mvm & 0xf) | + ((mvm & 0x10) << 1)); + return; + } + } + } + Delegate(kVqdmlsl, &Assembler::vqdmlsl, cond, dt, rd, rn, dm, index); +} + +void Assembler::vqdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMULH{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000b00U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQDMULH{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000b00U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqdmulh, &Assembler::vqdmulh, cond, dt, rd, rn, rm); +} + +void Assembler::vqdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMULH{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000b40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQDMULH{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000b40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqdmulh, &Assembler::vqdmulh, cond, dt, rd, rn, rm); +} + +void Assembler::vqdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMULH{}{}.
{
}, , ; T2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800c40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQDMULH{}{}.
{
}, , ; A2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + EmitA32(0xf2800c40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVqdmulh, &Assembler::vqdmulh, cond, dt, rd, rn, rm); +} + +void Assembler::vqdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMULH{}{}.
{}, , ; T2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff800c40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQDMULH{}{}.
{}, , ; A2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + EmitA32(0xf3800c40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVqdmulh, &Assembler::vqdmulh, cond, dt, rd, rn, rm); +} + +void Assembler::vqdmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMULL{}{}.
, , ; T1 + if (encoded_dt.IsValid() && (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800d00U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQDMULL{}{}.
, , ; A1 + if (encoded_dt.IsValid() && (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + EmitA32(0xf2800d00U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqdmull, &Assembler::vqdmull, cond, dt, rd, rn, rm); +} + +void Assembler::vqdmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQDMULL{}{}.
, , ; T2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800b40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQDMULL{}{}.
, , ; A2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + EmitA32(0xf2800b40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVqdmull, &Assembler::vqdmull, cond, dt, rd, rn, rm); +} + +void Assembler::vqmovn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_op_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VQMOVN{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20280U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0xc) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQMOVN{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b20280U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0xc) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqmovn, &Assembler::vqmovn, cond, dt, rd, rm); +} + +void Assembler::vqmovun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_14 encoded_dt(dt); + if (IsUsingT32()) { + // VQMOVUN{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20240U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQMOVUN{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b20240U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqmovun, &Assembler::vqmovun, cond, dt, rd, rm); +} + +void Assembler::vqneg(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_5 encoded_dt(dt); + if (IsUsingT32()) { + // VQNEG{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00780U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQNEG{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00780U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqneg, &Assembler::vqneg, cond, dt, rd, rm); +} + +void Assembler::vqneg(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_5 encoded_dt(dt); + if (IsUsingT32()) { + // VQNEG{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb007c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQNEG{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b007c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqneg, &Assembler::vqneg, cond, dt, rd, rm); +} + +void Assembler::vqrdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQRDMULH{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000b00U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQRDMULH{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3000b00U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqrdmulh, &Assembler::vqrdmulh, cond, dt, rd, rn, rm); +} + +void Assembler::vqrdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQRDMULH{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000b40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQRDMULH{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3000b40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqrdmulh, &Assembler::vqrdmulh, cond, dt, rd, rn, rm); +} + +void Assembler::vqrdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQRDMULH{}{}.
{
}, , ; T2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800d40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQRDMULH{}{}.
{
}, , ; A2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + EmitA32(0xf2800d40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVqrdmulh, &Assembler::vqrdmulh, cond, dt, rd, rn, rm); +} + +void Assembler::vqrdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_13 encoded_dt(dt); + if (IsUsingT32()) { + // VQRDMULH{}{}.
{}, , ; T2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff800d40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQRDMULH{}{}.
{}, , ; A2 + if (encoded_dt.IsValid() && + (((dt.GetSize() == 16) && (rm.GetCode() <= 7) && (rm.GetLane() <= 3)) || + ((dt.GetSize() == 32) && (rm.GetCode() <= 15) && + (rm.GetLane() <= 1))) && + (dt.Is(S16) || dt.Is(S32))) { + if (cond.Is(al)) { + EmitA32(0xf3800d40U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.EncodeX(dt, 5, 0)); + return; + } + } + } + Delegate(kVqrdmulh, &Assembler::vqrdmulh, cond, dt, rd, rn, rm); +} + +void Assembler::vqrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VQRSHL{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000510U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VQRSHL{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000510U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + return; + } + } + } + Delegate(kVqrshl, &Assembler::vqrshl, cond, dt, rd, rm, rn); +} + +void Assembler::vqrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VQRSHL{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000550U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VQRSHL{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000550U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + return; + } + } + } + Delegate(kVqrshl, &Assembler::vqrshl, cond, dt, rd, rm, rn); +} + +void Assembler::vqrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_op_size_3 encoded_dt(dt); + Dt_imm6_1 encoded_dt_2(dt); + if (IsUsingT32()) { + // VQRSHRN{}{}.
, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20280U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0xc) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VQRSHRN{}{}.
, , # ; T1 + if (encoded_dt_2.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitT32_32(0xef800950U | + (encoded_dt_2.GetTypeEncodingValue() << 28) | + ((encoded_dt_2.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VQRSHRN{}{}.
, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b20280U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0xc) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + // VQRSHRN{}{}.
, , # ; A1 + if (encoded_dt_2.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitA32(0xf2800950U | (encoded_dt_2.GetTypeEncodingValue() << 24) | + ((encoded_dt_2.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVqrshrn, &Assembler::vqrshrn, cond, dt, rd, rm, operand); +} + +void Assembler::vqrshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_imm6_2 encoded_dt(dt); + Dt_size_14 encoded_dt_2(dt); + if (IsUsingT32()) { + // VQRSHRUN{}{}.
, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitT32_32(0xff800850U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + // VQRSHRUN{}{}.
, , #0 ; T1 + if (encoded_dt_2.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20240U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQRSHRUN{}{}.
, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitA32(0xf3800850U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + // VQRSHRUN{}{}.
, , #0 ; A1 + if (encoded_dt_2.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b20240U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVqrshrun, &Assembler::vqrshrun, cond, dt, rd, rm, operand); +} + +void Assembler::vqshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsRegister()) { + DRegister rn = operand.GetRegister(); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VQSHL{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000410U | + ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VQSHL{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000410U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + return; + } + } + } + } + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VQSHL{}{}. {
}, , # ; T1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = imm; + EmitT32_32(0xef800710U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VQSHL{}{}. {
}, , # ; A1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al)) { + uint32_t imm6 = imm; + EmitA32(0xf2800710U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVqshl, &Assembler::vqshl, cond, dt, rd, rm, operand); +} + +void Assembler::vqshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsRegister()) { + QRegister rn = operand.GetRegister(); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VQSHL{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000450U | + ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VQSHL{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000450U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + return; + } + } + } + } + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VQSHL{}{}. {}, , # ; T1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = imm; + EmitT32_32(0xef800750U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VQSHL{}{}. {}, , # ; A1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al)) { + uint32_t imm6 = imm; + EmitA32(0xf2800750U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVqshl, &Assembler::vqshl, cond, dt, rd, rm, operand); +} + +void Assembler::vqshlu(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_2 encoded_dt(dt); + if (IsUsingT32()) { + // VQSHLU{}{}. {
}, , # ; T1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = imm; + EmitT32_32(0xef800610U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VQSHLU{}{}. {
}, , # ; A1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al)) { + uint32_t imm6 = imm; + EmitA32(0xf2800610U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVqshlu, &Assembler::vqshlu, cond, dt, rd, rm, operand); +} + +void Assembler::vqshlu(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_2 encoded_dt(dt); + if (IsUsingT32()) { + // VQSHLU{}{}. {}, , # ; T1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = imm; + EmitT32_32(0xef800650U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VQSHLU{}{}. {}, , # ; A1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al)) { + uint32_t imm6 = imm; + EmitA32(0xf2800650U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVqshlu, &Assembler::vqshlu, cond, dt, rd, rm, operand); +} + +void Assembler::vqshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_op_size_3 encoded_dt(dt); + Dt_imm6_1 encoded_dt_2(dt); + if (IsUsingT32()) { + // VQSHRN{}{}.
, , #0 ; T1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20280U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0xc) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VQSHRN{}{}.
, , # ; T1 + if (encoded_dt_2.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitT32_32(0xef800910U | + (encoded_dt_2.GetTypeEncodingValue() << 28) | + ((encoded_dt_2.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VQSHRN{}{}.
, , #0 ; A1 + if (encoded_dt.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b20280U | + ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0xc) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + // VQSHRN{}{}.
, , # ; A1 + if (encoded_dt_2.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitA32(0xf2800910U | (encoded_dt_2.GetTypeEncodingValue() << 24) | + ((encoded_dt_2.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVqshrn, &Assembler::vqshrn, cond, dt, rd, rm, operand); +} + +void Assembler::vqshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_imm6_2 encoded_dt(dt); + Dt_size_14 encoded_dt_2(dt); + if (IsUsingT32()) { + // VQSHRUN{}{}.
, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitT32_32(0xff800810U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + // VQSHRUN{}{}.
, , #0 ; T1 + if (encoded_dt_2.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20240U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQSHRUN{}{}.
, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitA32(0xf3800810U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + // VQSHRUN{}{}.
, , #0 ; A1 + if (encoded_dt_2.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b20240U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVqshrun, &Assembler::vqshrun, cond, dt, rd, rm, operand); +} + +void Assembler::vqsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VQSUB{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000210U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQSUB{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000210U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqsub, &Assembler::vqsub, cond, dt, rd, rn, rm); +} + +void Assembler::vqsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VQSUB{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000250U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VQSUB{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000250U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVqsub, &Assembler::vqsub, cond, dt, rd, rn, rm); +} + +void Assembler::vraddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VRADDHN{}{}.
, , ; T1 + if (encoded_dt.IsValid() && (dt.Is(I16) || dt.Is(I32) || dt.Is(I64))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff800400U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRADDHN{}{}.
, , ; A1 + if (encoded_dt.IsValid() && (dt.Is(I16) || dt.Is(I32) || dt.Is(I64))) { + if (cond.Is(al)) { + EmitA32(0xf3800400U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVraddhn, &Assembler::vraddhn, cond, dt, rd, rn, rm); +} + +void Assembler::vrecpe(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_4 encoded_dt(dt); + if (IsUsingT32()) { + // VRECPE{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb30400U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRECPE{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b30400U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrecpe, &Assembler::vrecpe, cond, dt, rd, rm); +} + +void Assembler::vrecpe(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_4 encoded_dt(dt); + if (IsUsingT32()) { + // VRECPE{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb30440U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRECPE{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b30440U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrecpe, &Assembler::vrecpe, cond, dt, rd, rm); +} + +void Assembler::vrecps( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VRECPS{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000f10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRECPS{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000f10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrecps, &Assembler::vrecps, cond, dt, rd, rn, rm); +} + +void Assembler::vrecps( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VRECPS{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000f50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRECPS{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2000f50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrecps, &Assembler::vrecps, cond, dt, rd, rn, rm); +} + +void Assembler::vrev16(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VREV16{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00100U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VREV16{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00100U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrev16, &Assembler::vrev16, cond, dt, rd, rm); +} + +void Assembler::vrev16(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VREV16{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00140U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VREV16{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00140U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrev16, &Assembler::vrev16, cond, dt, rd, rm); +} + +void Assembler::vrev32(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_15 encoded_dt(dt); + if (IsUsingT32()) { + // VREV32{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00080U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VREV32{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00080U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrev32, &Assembler::vrev32, cond, dt, rd, rm); +} + +void Assembler::vrev32(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_15 encoded_dt(dt); + if (IsUsingT32()) { + // VREV32{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb000c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VREV32{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b000c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrev32, &Assembler::vrev32, cond, dt, rd, rm); +} + +void Assembler::vrev64(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_7 encoded_dt(dt); + if (IsUsingT32()) { + // VREV64{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00000U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VREV64{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00000U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrev64, &Assembler::vrev64, cond, dt, rd, rm); +} + +void Assembler::vrev64(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_7 encoded_dt(dt); + if (IsUsingT32()) { + // VREV64{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb00040U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VREV64{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b00040U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrev64, &Assembler::vrev64, cond, dt, rd, rm); +} + +void Assembler::vrhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VRHADD{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000100U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRHADD{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000100U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrhadd, &Assembler::vrhadd, cond, dt, rd, rn, rm); +} + +void Assembler::vrhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VRHADD{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000140U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRHADD{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000140U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrhadd, &Assembler::vrhadd, cond, dt, rd, rn, rm); +} + +void Assembler::vrinta(DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTA{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb20500U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VRINTA{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xfeb80b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTA{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b20500U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + // VRINTA{}.F64
, ; A1 + if (dt.Is(F64)) { + EmitA32(0xfeb80b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrinta, &Assembler::vrinta, dt, rd, rm); +} + +void Assembler::vrinta(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTA{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb20540U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTA{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b20540U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrinta, &Assembler::vrinta, dt, rd, rm); +} + +void Assembler::vrinta(DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VRINTA{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xfeb80a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTA{}.F32 , ; A1 + if (dt.Is(F32)) { + EmitA32(0xfeb80a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrinta, &Assembler::vrinta, dt, rd, rm); +} + +void Assembler::vrintm(DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTM{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb20680U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VRINTM{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xfebb0b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTM{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b20680U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + // VRINTM{}.F64
, ; A1 + if (dt.Is(F64)) { + EmitA32(0xfebb0b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintm, &Assembler::vrintm, dt, rd, rm); +} + +void Assembler::vrintm(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTM{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb206c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTM{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b206c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintm, &Assembler::vrintm, dt, rd, rm); +} + +void Assembler::vrintm(DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VRINTM{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xfebb0a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTM{}.F32 , ; A1 + if (dt.Is(F32)) { + EmitA32(0xfebb0a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintm, &Assembler::vrintm, dt, rd, rm); +} + +void Assembler::vrintn(DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTN{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb20400U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VRINTN{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xfeb90b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTN{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b20400U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + // VRINTN{}.F64
, ; A1 + if (dt.Is(F64)) { + EmitA32(0xfeb90b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintn, &Assembler::vrintn, dt, rd, rm); +} + +void Assembler::vrintn(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTN{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb20440U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTN{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b20440U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintn, &Assembler::vrintn, dt, rd, rm); +} + +void Assembler::vrintn(DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VRINTN{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xfeb90a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTN{}.F32 , ; A1 + if (dt.Is(F32)) { + EmitA32(0xfeb90a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintn, &Assembler::vrintn, dt, rd, rm); +} + +void Assembler::vrintp(DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTP{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb20780U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VRINTP{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xfeba0b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTP{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b20780U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + // VRINTP{}.F64
, ; A1 + if (dt.Is(F64)) { + EmitA32(0xfeba0b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintp, &Assembler::vrintp, dt, rd, rm); +} + +void Assembler::vrintp(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTP{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb207c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTP{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b207c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintp, &Assembler::vrintp, dt, rd, rm); +} + +void Assembler::vrintp(DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VRINTP{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xfeba0a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTP{}.F32 , ; A1 + if (dt.Is(F32)) { + EmitA32(0xfeba0a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintp, &Assembler::vrintp, dt, rd, rm); +} + +void Assembler::vrintr(Condition cond, + DataType dt, + SRegister rd, + SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VRINTR{}{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xeeb60a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTR{}{}.F32 , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb60a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintr, &Assembler::vrintr, cond, dt, rd, rm); +} + +void Assembler::vrintr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VRINTR{}{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xeeb60b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTR{}{}.F64
, ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb60b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintr, &Assembler::vrintr, cond, dt, rd, rm); +} + +void Assembler::vrintx(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTX{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb20480U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VRINTX{}{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xeeb70b40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTX{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b20480U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + // VRINTX{}{}.F64
, ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb70b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintx, &Assembler::vrintx, cond, dt, rd, rm); +} + +void Assembler::vrintx(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTX{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb204c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTX{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b204c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintx, &Assembler::vrintx, dt, rd, rm); +} + +void Assembler::vrintx(Condition cond, + DataType dt, + SRegister rd, + SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VRINTX{}{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xeeb70a40U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTX{}{}.F32 , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb70a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintx, &Assembler::vrintx, cond, dt, rd, rm); +} + +void Assembler::vrintz(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTZ{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb20580U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VRINTZ{}{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xeeb60bc0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTZ{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b20580U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + // VRINTZ{}{}.F64
, ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb60bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintz, &Assembler::vrintz, cond, dt, rd, rm); +} + +void Assembler::vrintz(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + Dt_size_16 encoded_dt(dt); + if (IsUsingT32()) { + // VRINTZ{}.
, ; T1 + if (encoded_dt.IsValid()) { + EmitT32_32(0xffb205c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTZ{}.
, ; A1 + if (encoded_dt.IsValid()) { + EmitA32(0xf3b205c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintz, &Assembler::vrintz, dt, rd, rm); +} + +void Assembler::vrintz(Condition cond, + DataType dt, + SRegister rd, + SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VRINTZ{}{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xeeb60ac0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VRINTZ{}{}.F32 , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb60ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVrintz, &Assembler::vrintz, cond, dt, rd, rm); +} + +void Assembler::vrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VRSHL{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000500U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VRSHL{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000500U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + return; + } + } + } + Delegate(kVrshl, &Assembler::vrshl, cond, dt, rd, rm, rn); +} + +void Assembler::vrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VRSHL{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000540U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VRSHL{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000540U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + return; + } + } + } + Delegate(kVrshl, &Assembler::vrshl, cond, dt, rd, rm, rn); +} + +void Assembler::vrshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VRSHR{}{}. {
}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xef800210U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + // VRSHR{}{}.
, , #0 ; T1 + if ((dt.Is(kDataTypeS) || dt.Is(kDataTypeU)) && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200110U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRSHR{}{}. {
}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf2800210U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + // VRSHR{}{}.
, , #0 ; A1 + if ((dt.Is(kDataTypeS) || dt.Is(kDataTypeU)) && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf2200110U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVrshr, &Assembler::vrshr, cond, dt, rd, rm, operand); +} + +void Assembler::vrshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VRSHR{}{}. {}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xef800250U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + // VRSHR{}{}.
, , #0 ; T1 + if ((dt.Is(kDataTypeS) || dt.Is(kDataTypeU)) && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200150U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRSHR{}{}. {}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf2800250U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + // VRSHR{}{}.
, , #0 ; A1 + if ((dt.Is(kDataTypeS) || dt.Is(kDataTypeU)) && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf2200150U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVrshr, &Assembler::vrshr, cond, dt, rd, rm, operand); +} + +void Assembler::vrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_imm6_3 encoded_dt(dt); + Dt_size_3 encoded_dt_2(dt); + if (IsUsingT32()) { + // VRSHRN{}{}.I
, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitT32_32(0xef800850U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + // VRSHRN{}{}.
, , #0 ; T1 + if (encoded_dt_2.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20200U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRSHRN{}{}.I
, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitA32(0xf2800850U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + // VRSHRN{}{}.
, , #0 ; A1 + if (encoded_dt_2.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b20200U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVrshrn, &Assembler::vrshrn, cond, dt, rd, rm, operand); +} + +void Assembler::vrsqrte(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_4 encoded_dt(dt); + if (IsUsingT32()) { + // VRSQRTE{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb30480U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRSQRTE{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b30480U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrsqrte, &Assembler::vrsqrte, cond, dt, rd, rm); +} + +void Assembler::vrsqrte(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_F_size_4 encoded_dt(dt); + if (IsUsingT32()) { + // VRSQRTE{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb304c0U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRSQRTE{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b304c0U | ((encoded_dt.GetEncodingValue() & 0x3) << 18) | + ((encoded_dt.GetEncodingValue() & 0x4) << 6) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrsqrte, &Assembler::vrsqrte, cond, dt, rd, rm); +} + +void Assembler::vrsqrts( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VRSQRTS{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200f10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRSQRTS{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200f10U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrsqrts, &Assembler::vrsqrts, cond, dt, rd, rn, rm); +} + +void Assembler::vrsqrts( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VRSQRTS{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200f50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRSQRTS{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200f50U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrsqrts, &Assembler::vrsqrts, cond, dt, rd, rn, rm); +} + +void Assembler::vrsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VRSRA{}{}. {
}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xef800310U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VRSRA{}{}. {
}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf2800310U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVrsra, &Assembler::vrsra, cond, dt, rd, rm, operand); +} + +void Assembler::vrsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VRSRA{}{}. {}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xef800350U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VRSRA{}{}. {}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf2800350U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVrsra, &Assembler::vrsra, cond, dt, rd, rm, operand); +} + +void Assembler::vrsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VRSUBHN{}{}.
, , ; T1 + if (encoded_dt.IsValid() && (dt.Is(I16) || dt.Is(I32) || dt.Is(I64))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff800600U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VRSUBHN{}{}.
, , ; A1 + if (encoded_dt.IsValid() && (dt.Is(I16) || dt.Is(I32) || dt.Is(I64))) { + if (cond.Is(al)) { + EmitA32(0xf3800600U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVrsubhn, &Assembler::vrsubhn, cond, dt, rd, rn, rm); +} + +void Assembler::vseleq(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VSELEQ.F64
, , ; T1 + if (OutsideITBlock() && dt.Is(F64)) { + EmitT32_32(0xfe000b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSELEQ.F64
, , ; A1 + if (dt.Is(F64)) { + EmitA32(0xfe000b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVseleq, &Assembler::vseleq, dt, rd, rn, rm); +} + +void Assembler::vseleq(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VSELEQ.F32 , , ; T1 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xfe000a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSELEQ.F32 , , ; A1 + if (dt.Is(F32)) { + EmitA32(0xfe000a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVseleq, &Assembler::vseleq, dt, rd, rn, rm); +} + +void Assembler::vselge(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VSELGE.F64
, , ; T1 + if (OutsideITBlock() && dt.Is(F64)) { + EmitT32_32(0xfe200b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSELGE.F64
, , ; A1 + if (dt.Is(F64)) { + EmitA32(0xfe200b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVselge, &Assembler::vselge, dt, rd, rn, rm); +} + +void Assembler::vselge(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VSELGE.F32 , , ; T1 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xfe200a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSELGE.F32 , , ; A1 + if (dt.Is(F32)) { + EmitA32(0xfe200a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVselge, &Assembler::vselge, dt, rd, rn, rm); +} + +void Assembler::vselgt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VSELGT.F64
, , ; T1 + if (OutsideITBlock() && dt.Is(F64)) { + EmitT32_32(0xfe300b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSELGT.F64
, , ; A1 + if (dt.Is(F64)) { + EmitA32(0xfe300b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVselgt, &Assembler::vselgt, dt, rd, rn, rm); +} + +void Assembler::vselgt(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VSELGT.F32 , , ; T1 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xfe300a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSELGT.F32 , , ; A1 + if (dt.Is(F32)) { + EmitA32(0xfe300a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVselgt, &Assembler::vselgt, dt, rd, rn, rm); +} + +void Assembler::vselvs(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VSELVS.F64
, , ; T1 + if (OutsideITBlock() && dt.Is(F64)) { + EmitT32_32(0xfe100b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSELVS.F64
, , ; A1 + if (dt.Is(F64)) { + EmitA32(0xfe100b00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVselvs, &Assembler::vselvs, dt, rd, rn, rm); +} + +void Assembler::vselvs(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(al); + if (IsUsingT32()) { + // VSELVS.F32 , , ; T1 + if (OutsideITBlock() && dt.Is(F32)) { + EmitT32_32(0xfe100a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSELVS.F32 , , ; A1 + if (dt.Is(F32)) { + EmitA32(0xfe100a00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVselvs, &Assembler::vselvs, dt, rd, rn, rm); +} + +void Assembler::vshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_3 encoded_dt(dt); + if (IsUsingT32()) { + // VSHL{}{}.I {
}, , # ; T1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = imm; + EmitT32_32(0xef800510U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VSHL{}{}.I {
}, , # ; A1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al)) { + uint32_t imm6 = imm; + EmitA32(0xf2800510U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + if (operand.IsRegister()) { + DRegister rn = operand.GetRegister(); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VSHL{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000400U | + ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VSHL{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000400U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + return; + } + } + } + } + Delegate(kVshl, &Assembler::vshl, cond, dt, rd, rm, operand); +} + +void Assembler::vshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_3 encoded_dt(dt); + if (IsUsingT32()) { + // VSHL{}{}.I {}, , # ; T1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = imm; + EmitT32_32(0xef800550U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VSHL{}{}.I {}, , # ; A1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al)) { + uint32_t imm6 = imm; + EmitA32(0xf2800550U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + if (operand.IsRegister()) { + QRegister rn = operand.GetRegister(); + Dt_U_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VSHL{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000440U | + ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + AdvanceIT(); + return; + } + } + } else { + // VSHL{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000440U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rm.Encode(5, 0) | rn.Encode(7, 16)); + return; + } + } + } + } + Delegate(kVshl, &Assembler::vshl, cond, dt, rd, rm, operand); +} + +void Assembler::vshll(Condition cond, + DataType dt, + QRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_imm6_4 encoded_dt(dt); + Dt_size_17 encoded_dt_2(dt); + if (IsUsingT32()) { + // VSHLL{}{}. , , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() + imm; + EmitT32_32(0xef800a10U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + // VSHLL{}{}. , , # ; T2 + if (encoded_dt_2.IsValid() && (imm == dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20300U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VSHLL{}{}. , , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() + imm; + EmitA32(0xf2800a10U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + // VSHLL{}{}. , , # ; A2 + if (encoded_dt_2.IsValid() && (imm == dt.GetSize())) { + if (cond.Is(al)) { + EmitA32(0xf3b20300U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVshll, &Assembler::vshll, cond, dt, rd, rm, operand); +} + +void Assembler::vshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VSHR{}{}. {
}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xef800010U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + // VSHR{}{}.
, , #0 ; T1 + if ((dt.Is(kDataTypeS) || dt.Is(kDataTypeU)) && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200110U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VSHR{}{}. {
}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf2800010U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + // VSHR{}{}.
, , #0 ; A1 + if ((dt.Is(kDataTypeS) || dt.Is(kDataTypeU)) && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf2200110U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVshr, &Assembler::vshr, cond, dt, rd, rm, operand); +} + +void Assembler::vshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VSHR{}{}. {}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xef800050U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + // VSHR{}{}.
, , #0 ; T1 + if ((dt.Is(kDataTypeS) || dt.Is(kDataTypeU)) && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200150U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VSHR{}{}. {}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf2800050U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + // VSHR{}{}.
, , #0 ; A1 + if ((dt.Is(kDataTypeS) || dt.Is(kDataTypeU)) && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf2200150U | rd.Encode(22, 12) | rm.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVshr, &Assembler::vshr, cond, dt, rd, rm, operand); +} + +void Assembler::vshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_imm6_3 encoded_dt(dt); + Dt_size_3 encoded_dt_2(dt); + if (IsUsingT32()) { + // VSHRN{}{}.I
, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitT32_32(0xef800810U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + // VSHRN{}{}.
, , #0 ; T1 + if (encoded_dt_2.IsValid() && (imm == 0)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20200U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VSHRN{}{}.I
, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize() / 2)) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() / 2 - imm; + EmitA32(0xf2800810U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + // VSHRN{}{}.
, , #0 ; A1 + if (encoded_dt_2.IsValid() && (imm == 0)) { + if (cond.Is(al)) { + EmitA32(0xf3b20200U | (encoded_dt_2.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + } + } + Delegate(kVshrn, &Assembler::vshrn, cond, dt, rd, rm, operand); +} + +void Assembler::vsli(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_4 encoded_dt(dt); + if (IsUsingT32()) { + // VSLI{}{}.
{
}, , # ; T1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = imm; + EmitT32_32(0xff800510U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VSLI{}{}.
{
}, , # ; A1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al)) { + uint32_t imm6 = imm; + EmitA32(0xf3800510U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVsli, &Assembler::vsli, cond, dt, rd, rm, operand); +} + +void Assembler::vsli(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_4 encoded_dt(dt); + if (IsUsingT32()) { + // VSLI{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = imm; + EmitT32_32(0xff800550U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VSLI{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && (imm <= dt.GetSize() - 1)) { + if (cond.Is(al)) { + uint32_t imm6 = imm; + EmitA32(0xf3800550U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVsli, &Assembler::vsli, cond, dt, rd, rm, operand); +} + +void Assembler::vsqrt(Condition cond, DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VSQRT{}{}.F32 , ; T1 + if (dt.Is(F32)) { + EmitT32_32(0xeeb10ac0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSQRT{}{}.F32 , ; A1 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0eb10ac0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVsqrt, &Assembler::vsqrt, cond, dt, rd, rm); +} + +void Assembler::vsqrt(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VSQRT{}{}.F64
, ; T1 + if (dt.Is(F64)) { + EmitT32_32(0xeeb10bc0U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSQRT{}{}.F64
, ; A1 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0eb10bc0U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rm.Encode(5, 0)); + return; + } + } + Delegate(kVsqrt, &Assembler::vsqrt, cond, dt, rd, rm); +} + +void Assembler::vsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VSRA{}{}. {
}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xef800110U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VSRA{}{}. {
}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf2800110U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVsra, &Assembler::vsra, cond, dt, rd, rm, operand); +} + +void Assembler::vsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_1 encoded_dt(dt); + if (IsUsingT32()) { + // VSRA{}{}. {}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xef800150U | (encoded_dt.GetTypeEncodingValue() << 28) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VSRA{}{}. {}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf2800150U | (encoded_dt.GetTypeEncodingValue() << 24) | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVsra, &Assembler::vsra, cond, dt, rd, rm, operand); +} + +void Assembler::vsri(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_4 encoded_dt(dt); + if (IsUsingT32()) { + // VSRI{}{}.
{
}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xff800410U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VSRI{}{}.
{
}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf3800410U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVsri, &Assembler::vsri, cond, dt, rd, rm, operand); +} + +void Assembler::vsri(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + if (operand.GetNeonImmediate().CanConvert()) { + uint32_t imm = operand.GetNeonImmediate().GetImmediate(); + Dt_L_imm6_4 encoded_dt(dt); + if (IsUsingT32()) { + // VSRI{}{}.
{}, , # ; T1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + uint32_t imm6 = dt.GetSize() - imm; + EmitT32_32(0xff800450U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + AdvanceIT(); + return; + } + } + } else { + // VSRI{}{}.
{}, , # ; A1 + if (encoded_dt.IsValid() && (imm >= 1) && (imm <= dt.GetSize())) { + if (cond.Is(al)) { + uint32_t imm6 = dt.GetSize() - imm; + EmitA32(0xf3800450U | + ((encoded_dt.GetEncodingValue() & 0x7) << 19) | + ((encoded_dt.GetEncodingValue() & 0x8) << 4) | + rd.Encode(22, 12) | rm.Encode(5, 0) | (imm6 << 16)); + return; + } + } + } + } + } + Delegate(kVsri, &Assembler::vsri, cond, dt, rd, rm, operand); +} + +void Assembler::vst1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Dt_size_6 encoded_dt(dt); + Dt_size_7 encoded_dt_2(dt); + Align_align_5 encoded_align_1(align, nreglist); + Align_index_align_1 encoded_align_2(align, nreglist, dt); + if (IsUsingT32()) { + // VST1{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitT32_32(0xf900000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST1{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitT32_32(0xf900000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST1{}{}.
, [{:}] ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && operand.IsOffset() && + encoded_align_2.IsValid() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf980000fU | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST1{}{}.
, [{:}]! ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && operand.IsPostIndex() && + encoded_align_2.IsValid() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf980000dU | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VST1{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitA32(0xf400000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VST1{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitA32(0xf400000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VST1{}{}.
, [{:}] ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && operand.IsOffset() && + encoded_align_2.IsValid() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf480000fU | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + // VST1{}{}.
, [{:}]! ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && operand.IsPostIndex() && + encoded_align_2.IsValid() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf480000dU | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Register rm = operand.GetOffsetRegister(); + Dt_size_6 encoded_dt(dt); + Dt_size_7 encoded_dt_2(dt); + Align_align_5 encoded_align_1(align, nreglist); + Align_index_align_1 encoded_align_2(align, nreglist, dt); + if (IsUsingT32()) { + // VST1{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitT32_32(0xf9000000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VST1{}{}.
, [{:}], ; T1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && !rm.IsPC() && !rm.IsSP() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9800000U | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | + rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VST1{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + switch (nreglist.GetLength()) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 1: + len_encoding = 0x7; + break; + case 2: + len_encoding = 0xa; + break; + case 3: + len_encoding = 0x6; + break; + case 4: + len_encoding = 0x2; + break; + } + EmitA32(0xf4000000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VST1{}{}.
, [{:}], ; A1 + if (encoded_dt_2.IsValid() && nreglist.IsTransferOneLane() && + (nreglist.GetLength() == 1) && !rm.IsPC() && !rm.IsSP() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4800000U | (encoded_dt_2.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVst1, &Assembler::vst1, cond, dt, nreglist, operand); +} + +void Assembler::vst2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Dt_size_7 encoded_dt(dt); + Align_align_2 encoded_align_1(align, nreglist); + Align_index_align_2 encoded_align_2(align, nreglist, dt); + if (IsUsingT32()) { + // VST2{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitT32_32(0xf900000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST2{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitT32_32(0xf900000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST2{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf980010fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST2{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf980010dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VST2{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitA32(0xf400000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VST2{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitA32(0xf400000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VST2{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf480010fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + // VST2{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf480010dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Register rm = operand.GetOffsetRegister(); + Dt_size_7 encoded_dt(dt); + Align_align_2 encoded_align_1(align, nreglist); + Align_index_align_2 encoded_align_2(align, nreglist, dt); + if (IsUsingT32()) { + // VST2{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitT32_32(0xf9000000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VST2{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9800100U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | + rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VST2{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding; + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x8; + } + if (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2)) { + len_encoding = 0x9; + } + if (nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) { + len_encoding = 0x3; + } + EmitA32(0xf4000000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VST2{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 2)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 2))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4800100U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVst2, &Assembler::vst2, cond, dt, nreglist, operand); +} + +void Assembler::vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Dt_size_7 encoded_dt(dt); + Align_align_3 encoded_align_1(align); + if (IsUsingT32()) { + // VST3{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitT32_32(0xf900000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST3{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitT32_32(0xf900000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VST3{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitA32(0xf400000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VST3{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitA32(0xf400000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Register rm = operand.GetOffsetRegister(); + Dt_size_7 encoded_dt(dt); + Align_align_3 encoded_align_1(align); + if (IsUsingT32()) { + // VST3{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitT32_32(0xf9000000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VST3{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x4 : 0x5; + EmitA32(0xf4000000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVst3, &Assembler::vst3, cond, dt, nreglist, operand); +} + +void Assembler::vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Dt_size_7 encoded_dt(dt); + Index_1 encoded_align_1(nreglist, dt); + if (IsUsingT32()) { + // VST3{}{}.
, [] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf980020fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST3{}{}.
, []! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf980020dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VST3{}{}.
, [] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsOffset() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf480020fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + // VST3{}{}.
, []! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + operand.IsPostIndex() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf480020dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Sign sign = operand.GetSign(); + Register rm = operand.GetOffsetRegister(); + Dt_size_7 encoded_dt(dt); + Index_1 encoded_align_1(nreglist, dt); + if (IsUsingT32()) { + // VST3{}{}.
, [], # ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + sign.IsPlus() && operand.IsPostIndex() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9800200U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | + rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VST3{}{}.
, [], # ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 3)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 3))) && + sign.IsPlus() && operand.IsPostIndex() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4800200U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVst3, &Assembler::vst3, cond, dt, nreglist, operand); +} + +void Assembler::vst4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediateZero()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Dt_size_7 encoded_dt(dt); + Align_align_4 encoded_align_1(align); + Align_index_align_3 encoded_align_2(align, nreglist, dt); + if (IsUsingT32()) { + // VST4{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf900000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST4{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf900000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST4{}{}.
, [{:}] ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf980030fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + // VST4{}{}.
, [{:}]! ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf980030dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + AdvanceIT(); + return; + } + } + } else { + // VST4{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf400000fU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VST4{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_1.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf400000dU | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16)); + return; + } + } + // VST4{}{}.
, [{:}] ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsOffset() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf480030fU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + // VST4{}{}.
, [{:}]! ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + operand.IsPostIndex() && encoded_align_2.IsValid() && + (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf480030dU | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16)); + return; + } + } + } + } + if (operand.IsPlainRegister()) { + Register rn = operand.GetBaseRegister(); + Alignment align = operand.GetAlignment(); + Register rm = operand.GetOffsetRegister(); + Dt_size_7 encoded_dt(dt); + Align_align_4 encoded_align_1(align); + Align_index_align_3 encoded_align_2(align, nreglist, dt); + if (IsUsingT32()) { + // VST4{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitT32_32(0xf9000000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + AdvanceIT(); + return; + } + } + // VST4{}{}.
, [{:}], ; T1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitT32_32(0xf9800300U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | + rm.GetCode()); + AdvanceIT(); + return; + } + } + } else { + // VST4{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferMultipleLanes() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.IsSingleSpaced() ? 0x0 : 0x1; + EmitA32(0xf4000000U | (encoded_dt.GetEncodingValue() << 6) | + (encoded_align_1.GetEncodingValue() << 4) | + first.Encode(22, 12) | (len_encoding << 8) | + (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + // VST4{}{}.
, [{:}], ; A1 + if (encoded_dt.IsValid() && nreglist.IsTransferOneLane() && + ((nreglist.IsSingleSpaced() && (nreglist.GetLength() == 4)) || + (nreglist.IsDoubleSpaced() && (nreglist.GetLength() == 4))) && + !rm.IsPC() && !rm.IsSP() && (!rn.IsPC() || AllowUnpredictable())) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + EmitA32(0xf4800300U | (encoded_dt.GetEncodingValue() << 10) | + (encoded_align_2.GetEncodingValue() << 4) | + first.Encode(22, 12) | (rn.GetCode() << 16) | rm.GetCode()); + return; + } + } + } + } + Delegate(kVst4, &Assembler::vst4, cond, dt, nreglist, operand); +} + +void Assembler::vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VSTM{}{}{.} {!}, ; T1 + if ((((dreglist.GetLength() <= 16) && !rn.IsPC()) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitT32_32(0xec800b00U | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | dreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VSTM{}{}{.} {!}, ; A1 + if (cond.IsNotNever() && (((dreglist.GetLength() <= 16) && + (!rn.IsPC() || !write_back.DoesWriteBack())) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitA32(0x0c800b00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | dreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVstm, &Assembler::vstm, cond, dt, rn, write_back, dreglist); +} + +void Assembler::vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VSTM{}{}{.} {!}, ; T2 + if ((!rn.IsPC() || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitT32_32(0xec800a00U | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | sreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VSTM{}{}{.} {!}, ; A2 + if (cond.IsNotNever() && + ((!rn.IsPC() || !write_back.DoesWriteBack()) || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitA32(0x0c800a00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | sreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVstm, &Assembler::vstm, cond, dt, rn, write_back, sreglist); +} + +void Assembler::vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VSTMDB{}{}{.} !, ; T1 + if (write_back.DoesWriteBack() && + (((dreglist.GetLength() <= 16) && !rn.IsPC()) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitT32_32(0xed200b00U | (rn.GetCode() << 16) | dreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VSTMDB{}{}{.} !, ; A1 + if (write_back.DoesWriteBack() && cond.IsNotNever() && + (((dreglist.GetLength() <= 16) && !rn.IsPC()) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitA32(0x0d200b00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + dreg.Encode(22, 12) | (len & 0xff)); + return; + } + } + Delegate(kVstmdb, &Assembler::vstmdb, cond, dt, rn, write_back, dreglist); +} + +void Assembler::vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VSTMDB{}{}{.} !, ; T2 + if (write_back.DoesWriteBack() && (!rn.IsPC() || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitT32_32(0xed200a00U | (rn.GetCode() << 16) | sreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VSTMDB{}{}{.} !, ; A2 + if (write_back.DoesWriteBack() && cond.IsNotNever() && + (!rn.IsPC() || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitA32(0x0d200a00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + sreg.Encode(22, 12) | (len & 0xff)); + return; + } + } + Delegate(kVstmdb, &Assembler::vstmdb, cond, dt, rn, write_back, sreglist); +} + +void Assembler::vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VSTMIA{}{}{.} {!}, ; T1 + if ((((dreglist.GetLength() <= 16) && !rn.IsPC()) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitT32_32(0xec800b00U | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | dreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VSTMIA{}{}{.} {!}, ; A1 + if (cond.IsNotNever() && (((dreglist.GetLength() <= 16) && + (!rn.IsPC() || !write_back.DoesWriteBack())) || + AllowUnpredictable())) { + const DRegister& dreg = dreglist.GetFirstDRegister(); + unsigned len = dreglist.GetLength() * 2; + EmitA32(0x0c800b00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | dreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVstmia, &Assembler::vstmia, cond, dt, rn, write_back, dreglist); +} + +void Assembler::vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VSTMIA{}{}{.} {!}, ; T2 + if ((!rn.IsPC() || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitT32_32(0xec800a00U | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | sreg.Encode(22, 12) | + (len & 0xff)); + AdvanceIT(); + return; + } + } else { + // VSTMIA{}{}{.} {!}, ; A2 + if (cond.IsNotNever() && + ((!rn.IsPC() || !write_back.DoesWriteBack()) || AllowUnpredictable())) { + const SRegister& sreg = sreglist.GetFirstSRegister(); + unsigned len = sreglist.GetLength(); + EmitA32(0x0c800a00U | (cond.GetCondition() << 28) | (rn.GetCode() << 16) | + (write_back.GetWriteBackUint32() << 21) | sreg.Encode(22, 12) | + (len & 0xff)); + return; + } + } + Delegate(kVstmia, &Assembler::vstmia, cond, dt, rn, write_back, sreglist); +} + +void Assembler::vstr(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + Register rn = operand.GetBaseRegister(); + int32_t offset = operand.GetOffsetImmediate(); + if (IsUsingT32()) { + // VSTR{}{}{.64}
, [{, #{+/-}}] ; T1 + if (dt.IsNoneOr(Untyped64) && (offset >= -1020) && (offset <= 1020) && + ((offset % 4) == 0) && operand.IsOffset() && + (!rn.IsPC() || AllowUnpredictable())) { + uint32_t sign = operand.GetSign().IsPlus() ? 1 : 0; + uint32_t offset_ = abs(offset) >> 2; + EmitT32_32(0xed000b00U | rd.Encode(22, 12) | (rn.GetCode() << 16) | + offset_ | (sign << 23)); + AdvanceIT(); + return; + } + } else { + // VSTR{}{}{.64}
, [{, #{+/-}}] ; A1 + if (dt.IsNoneOr(Untyped64) && (offset >= -1020) && (offset <= 1020) && + ((offset % 4) == 0) && operand.IsOffset() && cond.IsNotNever()) { + uint32_t sign = operand.GetSign().IsPlus() ? 1 : 0; + uint32_t offset_ = abs(offset) >> 2; + EmitA32(0x0d000b00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + (rn.GetCode() << 16) | offset_ | (sign << 23)); + return; + } + } + } + Delegate(kVstr, &Assembler::vstr, cond, dt, rd, operand); +} + +void Assembler::vstr(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (operand.IsImmediate()) { + Register rn = operand.GetBaseRegister(); + int32_t offset = operand.GetOffsetImmediate(); + if (IsUsingT32()) { + // VSTR{}{}{.32} , [{, #{+/-}}] ; T2 + if (dt.IsNoneOr(Untyped32) && (offset >= -1020) && (offset <= 1020) && + ((offset % 4) == 0) && operand.IsOffset() && + (!rn.IsPC() || AllowUnpredictable())) { + uint32_t sign = operand.GetSign().IsPlus() ? 1 : 0; + uint32_t offset_ = abs(offset) >> 2; + EmitT32_32(0xed000a00U | rd.Encode(22, 12) | (rn.GetCode() << 16) | + offset_ | (sign << 23)); + AdvanceIT(); + return; + } + } else { + // VSTR{}{}{.32} , [{, #{+/-}}] ; A2 + if (dt.IsNoneOr(Untyped32) && (offset >= -1020) && (offset <= 1020) && + ((offset % 4) == 0) && operand.IsOffset() && cond.IsNotNever()) { + uint32_t sign = operand.GetSign().IsPlus() ? 1 : 0; + uint32_t offset_ = abs(offset) >> 2; + EmitA32(0x0d000a00U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + (rn.GetCode() << 16) | offset_ | (sign << 23)); + return; + } + } + } + Delegate(kVstr, &Assembler::vstr, cond, dt, rd, operand); +} + +void Assembler::vsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VSUB{}{}.F32 {
}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200d00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VSUB{}{}.F64 {
}, , ; T2 + if (dt.Is(F64)) { + EmitT32_32(0xee300b40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + // VSUB{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000800U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VSUB{}{}.F32 {
}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200d00U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VSUB{}{}.F64 {
}, , ; A2 + if (dt.Is(F64) && cond.IsNotNever()) { + EmitA32(0x0e300b40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + // VSUB{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3000800U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVsub, &Assembler::vsub, cond, dt, rd, rn, rm); +} + +void Assembler::vsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_2 encoded_dt(dt); + if (IsUsingT32()) { + // VSUB{}{}.F32 {}, , ; T1 + if (dt.Is(F32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef200d40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VSUB{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xff000840U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VSUB{}{}.F32 {}, , ; A1 + if (dt.Is(F32)) { + if (cond.Is(al)) { + EmitA32(0xf2200d40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + return; + } + } + // VSUB{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3000840U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVsub, &Assembler::vsub, cond, dt, rd, rn, rm); +} + +void Assembler::vsub( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VSUB{}{}.F32 {}, , ; T2 + if (dt.Is(F32)) { + EmitT32_32(0xee300a40U | rd.Encode(22, 12) | rn.Encode(7, 16) | + rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSUB{}{}.F32 {}, , ; A2 + if (dt.Is(F32) && cond.IsNotNever()) { + EmitA32(0x0e300a40U | (cond.GetCondition() << 28) | rd.Encode(22, 12) | + rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVsub, &Assembler::vsub, cond, dt, rd, rn, rm); +} + +void Assembler::vsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_3 encoded_dt(dt); + if (IsUsingT32()) { + // VSUBHN{}{}.
, , ; T1 + if (encoded_dt.IsValid() && (dt.Is(I16) || dt.Is(I32) || dt.Is(I64))) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800600U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VSUBHN{}{}.
, , ; A1 + if (encoded_dt.IsValid() && (dt.Is(I16) || dt.Is(I32) || dt.Is(I64))) { + if (cond.Is(al)) { + EmitA32(0xf2800600U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVsubhn, &Assembler::vsubhn, cond, dt, rd, rn, rm); +} + +void Assembler::vsubl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VSUBL{}{}.
, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800200U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VSUBL{}{}.
, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800200U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVsubl, &Assembler::vsubl, cond, dt, rd, rn, rm); +} + +void Assembler::vsubw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_U_size_1 encoded_dt(dt); + if (IsUsingT32()) { + // VSUBW{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef800300U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 26) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VSUBW{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2800300U | ((encoded_dt.GetEncodingValue() & 0x3) << 20) | + ((encoded_dt.GetEncodingValue() & 0x4) << 22) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVsubw, &Assembler::vsubw, cond, dt, rd, rn, rm); +} + +void Assembler::vswp(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VSWP{}{}{.
}
, ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20000U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSWP{}{}{.
}
, ; A1 + if (cond.Is(al)) { + EmitA32(0xf3b20000U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVswp, &Assembler::vswp, cond, dt, rd, rm); +} + +void Assembler::vswp(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + USE(dt); + if (IsUsingT32()) { + // VSWP{}{}{.
} , ; T1 + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20040U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } else { + // VSWP{}{}{.
} , ; A1 + if (cond.Is(al)) { + EmitA32(0xf3b20040U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + Delegate(kVswp, &Assembler::vswp, cond, dt, rd, rm); +} + +void Assembler::vtbl(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VTBL{}{}.8
, , ; T1 + if (dt.Is(Untyped8) && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitT32_32(0xffb00800U | rd.Encode(22, 12) | first.Encode(7, 16) | + (len_encoding << 8) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VTBL{}{}.8
, , ; A1 + if (dt.Is(Untyped8) && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4)) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitA32(0xf3b00800U | rd.Encode(22, 12) | first.Encode(7, 16) | + (len_encoding << 8) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVtbl, &Assembler::vtbl, cond, dt, rd, nreglist, rm); +} + +void Assembler::vtbx(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // VTBX{}{}.8
, , ; T1 + if (dt.Is(Untyped8) && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitT32_32(0xffb00840U | rd.Encode(22, 12) | first.Encode(7, 16) | + (len_encoding << 8) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VTBX{}{}.8
, , ; A1 + if (dt.Is(Untyped8) && nreglist.IsTransferMultipleLanes() && + (nreglist.IsSingleSpaced()) && (nreglist.GetLength() <= 4)) { + if (cond.Is(al)) { + const DRegister& first = nreglist.GetFirstDRegister(); + uint32_t len_encoding = nreglist.GetLength() - 1; + EmitA32(0xf3b00840U | rd.Encode(22, 12) | first.Encode(7, 16) | + (len_encoding << 8) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVtbx, &Assembler::vtbx, cond, dt, rd, nreglist, rm); +} + +void Assembler::vtrn(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_7 encoded_dt(dt); + if (IsUsingT32()) { + // VTRN{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20080U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VTRN{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b20080U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVtrn, &Assembler::vtrn, cond, dt, rd, rm); +} + +void Assembler::vtrn(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_7 encoded_dt(dt); + if (IsUsingT32()) { + // VTRN{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb200c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VTRN{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b200c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVtrn, &Assembler::vtrn, cond, dt, rd, rm); +} + +void Assembler::vtst( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_7 encoded_dt(dt); + if (IsUsingT32()) { + // VTST{}{}.
{
}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000810U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VTST{}{}.
{
}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000810U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVtst, &Assembler::vtst, cond, dt, rd, rn, rm); +} + +void Assembler::vtst( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_7 encoded_dt(dt); + if (IsUsingT32()) { + // VTST{}{}.
{}, , ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xef000850U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VTST{}{}.
{}, , ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf2000850U | (encoded_dt.GetEncodingValue() << 20) | + rd.Encode(22, 12) | rn.Encode(7, 16) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVtst, &Assembler::vtst, cond, dt, rd, rn, rm); +} + +void Assembler::vuzp(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_15 encoded_dt(dt); + if (IsUsingT32()) { + // VUZP{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20100U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VUZP{}{}.32
, ; T1 + if (dt.Is(Untyped32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffba0080U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VUZP{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b20100U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + // VUZP{}{}.32
, ; A1 + if (dt.Is(Untyped32)) { + if (cond.Is(al)) { + EmitA32(0xf3ba0080U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVuzp, &Assembler::vuzp, cond, dt, rd, rm); +} + +void Assembler::vuzp(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_7 encoded_dt(dt); + if (IsUsingT32()) { + // VUZP{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20140U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VUZP{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b20140U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVuzp, &Assembler::vuzp, cond, dt, rd, rm); +} + +void Assembler::vzip(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_15 encoded_dt(dt); + if (IsUsingT32()) { + // VZIP{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb20180U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + // VZIP{}{}.32
, ; T1 + if (dt.Is(Untyped32)) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffba0080U | rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VZIP{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b20180U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + // VZIP{}{}.32
, ; A1 + if (dt.Is(Untyped32)) { + if (cond.Is(al)) { + EmitA32(0xf3ba0080U | rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVzip, &Assembler::vzip, cond, dt, rd, rm); +} + +void Assembler::vzip(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + Dt_size_7 encoded_dt(dt); + if (IsUsingT32()) { + // VZIP{}{}.
, ; T1 + if (encoded_dt.IsValid()) { + if (cond.Is(al) || AllowStronglyDiscouraged()) { + EmitT32_32(0xffb201c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + AdvanceIT(); + return; + } + } + } else { + // VZIP{}{}.
, ; A1 + if (encoded_dt.IsValid()) { + if (cond.Is(al)) { + EmitA32(0xf3b201c0U | (encoded_dt.GetEncodingValue() << 18) | + rd.Encode(22, 12) | rm.Encode(5, 0)); + return; + } + } + } + Delegate(kVzip, &Assembler::vzip, cond, dt, rd, rm); +} + +void Assembler::yield(Condition cond, EncodingSize size) { + VIXL_ASSERT(AllowAssembler()); + CheckIT(cond); + if (IsUsingT32()) { + // YIELD{}{} ; T1 + if (!size.IsWide()) { + EmitT32_16(0xbf10); + AdvanceIT(); + return; + } + // YIELD{}.W ; T2 + if (!size.IsNarrow()) { + EmitT32_32(0xf3af8001U); + AdvanceIT(); + return; + } + } else { + // YIELD{}{} ; A1 + if (cond.IsNotNever()) { + EmitA32(0x0320f001U | (cond.GetCondition() << 28)); + return; + } + } + Delegate(kYield, &Assembler::yield, cond, size); +} +// End of generated code. + +} // namespace aarch32 +} // namespace vixl diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch32/assembler-aarch32.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/assembler-aarch32.h new file mode 100644 index 00000000..bb7df840 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/assembler-aarch32.h @@ -0,0 +1,6159 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH32_ASSEMBLER_AARCH32_H_ +#define VIXL_AARCH32_ASSEMBLER_AARCH32_H_ + +#include "assembler-base-vixl.h" + +#include "aarch32/instructions-aarch32.h" +#include "aarch32/location-aarch32.h" + +namespace vixl { +namespace aarch32 { + +class Assembler : public internal::AssemblerBase { + InstructionSet isa_; + Condition first_condition_; + uint16_t it_mask_; + bool has_32_dregs_; + bool allow_unpredictable_; + bool allow_strongly_discouraged_; + + protected: + void EmitT32_16(uint16_t instr); + void EmitT32_32(uint32_t instr); + void EmitA32(uint32_t instr); + // Check that the condition of the current instruction is consistent with the + // IT state. + void CheckIT(Condition condition) { +#ifdef VIXL_DEBUG + PerformCheckIT(condition); +#else + USE(condition); +#endif + } +#ifdef VIXL_DEBUG + void PerformCheckIT(Condition condition); +#endif + void AdvanceIT() { + first_condition_ = + Condition((first_condition_.GetCondition() & 0xe) | (it_mask_ >> 3)); + it_mask_ = (it_mask_ << 1) & 0xf; + } + // Virtual, in order to be overridden by the MacroAssembler, which needs to + // notify the pool manager. + virtual void BindHelper(Label* label); + + uint32_t Link(uint32_t instr, + Location* location, + const Location::EmitOperator& op, + const ReferenceInfo* info); + + public: + class AllowUnpredictableScope { + Assembler* assembler_; + bool old_; + + public: + explicit AllowUnpredictableScope(Assembler* assembler) + : assembler_(assembler), old_(assembler->allow_unpredictable_) { + assembler_->allow_unpredictable_ = true; + } + ~AllowUnpredictableScope() { assembler_->allow_unpredictable_ = old_; } + }; + class AllowStronglyDiscouragedScope { + Assembler* assembler_; + bool old_; + + public: + explicit AllowStronglyDiscouragedScope(Assembler* assembler) + : assembler_(assembler), old_(assembler->allow_strongly_discouraged_) { + assembler_->allow_strongly_discouraged_ = true; + } + ~AllowStronglyDiscouragedScope() { + assembler_->allow_strongly_discouraged_ = old_; + } + }; + + explicit Assembler(InstructionSet isa = kDefaultISA) + : isa_(isa), + first_condition_(al), + it_mask_(0), + has_32_dregs_(true), + allow_unpredictable_(false), + allow_strongly_discouraged_(false) { +#if defined(VIXL_INCLUDE_TARGET_A32_ONLY) + // Avoid compiler warning. + USE(isa_); + VIXL_ASSERT(isa == A32); +#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY) + USE(isa_); + VIXL_ASSERT(isa == T32); +#endif + } + explicit Assembler(size_t capacity, InstructionSet isa = kDefaultISA) + : AssemblerBase(capacity), + isa_(isa), + first_condition_(al), + it_mask_(0), + has_32_dregs_(true), + allow_unpredictable_(false), + allow_strongly_discouraged_(false) { +#if defined(VIXL_INCLUDE_TARGET_A32_ONLY) + VIXL_ASSERT(isa == A32); +#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY) + VIXL_ASSERT(isa == T32); +#endif + } + Assembler(byte* buffer, size_t capacity, InstructionSet isa = kDefaultISA) + : AssemblerBase(buffer, capacity), + isa_(isa), + first_condition_(al), + it_mask_(0), + has_32_dregs_(true), + allow_unpredictable_(false), + allow_strongly_discouraged_(false) { +#if defined(VIXL_INCLUDE_TARGET_A32_ONLY) + VIXL_ASSERT(isa == A32); +#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY) + VIXL_ASSERT(isa == T32); +#endif + } + virtual ~Assembler() {} + + void UseInstructionSet(InstructionSet isa) { +#if defined(VIXL_INCLUDE_TARGET_A32_ONLY) + USE(isa); + VIXL_ASSERT(isa == A32); +#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY) + USE(isa); + VIXL_ASSERT(isa == T32); +#else + VIXL_ASSERT((isa_ == isa) || (GetCursorOffset() == 0)); + isa_ = isa; +#endif + } + +#if defined(VIXL_INCLUDE_TARGET_A32_ONLY) + InstructionSet GetInstructionSetInUse() const { return A32; } +#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY) + InstructionSet GetInstructionSetInUse() const { return T32; } +#else + InstructionSet GetInstructionSetInUse() const { return isa_; } +#endif + + void UseT32() { UseInstructionSet(T32); } + void UseA32() { UseInstructionSet(A32); } + bool IsUsingT32() const { return GetInstructionSetInUse() == T32; } + bool IsUsingA32() const { return GetInstructionSetInUse() == A32; } + + void SetIT(Condition first_condition, uint16_t it_mask) { + VIXL_ASSERT(it_mask_ == 0); + first_condition_ = first_condition; + it_mask_ = it_mask; + } + bool InITBlock() { return it_mask_ != 0; } + bool OutsideITBlock() { return it_mask_ == 0; } + bool OutsideITBlockOrLast() { return (it_mask_ == 0) || (it_mask_ == 0x8); } + bool OutsideITBlockAndAlOrLast(Condition cond) { + return ((it_mask_ == 0) && cond.Is(al)) || (it_mask_ == 0x8); + } + void CheckNotIT() { VIXL_ASSERT(it_mask_ == 0); } + bool Has32DRegs() const { return has_32_dregs_; } + void SetHas32DRegs(bool has_32_dregs) { has_32_dregs_ = has_32_dregs; } + + int32_t GetCursorOffset() const { + ptrdiff_t offset = buffer_.GetCursorOffset(); + VIXL_ASSERT(IsInt32(offset)); + return static_cast(offset); + } + + uint32_t GetArchitectureStatePCOffset() const { return IsUsingT32() ? 4 : 8; } + + // Bind a raw Location that will never be tracked by the pool manager. + void bind(Location* location) { + VIXL_ASSERT(AllowAssembler()); + VIXL_ASSERT(!location->IsBound()); + location->SetLocation(this, GetCursorOffset()); + location->MarkBound(); + } + + // Bind a Label, which may be tracked by the pool manager in the presence of a + // MacroAssembler. + void bind(Label* label) { + VIXL_ASSERT(AllowAssembler()); + BindHelper(label); + } + + void place(RawLiteral* literal) { + VIXL_ASSERT(AllowAssembler()); + VIXL_ASSERT(literal->IsManuallyPlaced()); + literal->SetLocation(this, GetCursorOffset()); + literal->MarkBound(); + GetBuffer()->EnsureSpaceFor(literal->GetSize()); + GetBuffer()->EmitData(literal->GetDataAddress(), literal->GetSize()); + } + + size_t GetSizeOfCodeGeneratedSince(Label* label) const { + VIXL_ASSERT(label->IsBound()); + return buffer_.GetOffsetFrom(label->GetLocation()); + } + + // Helpers for it instruction. + void it(Condition cond) { it(cond, 0x8); } + void itt(Condition cond) { it(cond, 0x4); } + void ite(Condition cond) { it(cond, 0xc); } + void ittt(Condition cond) { it(cond, 0x2); } + void itet(Condition cond) { it(cond, 0xa); } + void itte(Condition cond) { it(cond, 0x6); } + void itee(Condition cond) { it(cond, 0xe); } + void itttt(Condition cond) { it(cond, 0x1); } + void itett(Condition cond) { it(cond, 0x9); } + void ittet(Condition cond) { it(cond, 0x5); } + void iteet(Condition cond) { it(cond, 0xd); } + void ittte(Condition cond) { it(cond, 0x3); } + void itete(Condition cond) { it(cond, 0xb); } + void ittee(Condition cond) { it(cond, 0x7); } + void iteee(Condition cond) { it(cond, 0xf); } + + // Start of generated code. + typedef void (Assembler::*InstructionCondSizeRROp)(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + typedef void (Assembler::*InstructionCondROp)(Condition cond, + Register rd, + const Operand& operand); + typedef void (Assembler::*InstructionROp)(Register rd, + const Operand& operand); + typedef void (Assembler::*InstructionCondRROp)(Condition cond, + Register rd, + Register rn, + const Operand& operand); + typedef void (Assembler::*InstructionCondSizeRL)(Condition cond, + EncodingSize size, + Register rd, + Location* location); + typedef void (Assembler::*InstructionDtQQ)(DataType dt, + QRegister rd, + QRegister rm); + typedef void (Assembler::*InstructionCondSizeL)(Condition cond, + EncodingSize size, + Location* location); + typedef void (Assembler::*InstructionCondRII)(Condition cond, + Register rd, + uint32_t lsb, + uint32_t width); + typedef void (Assembler::*InstructionCondRRII)( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width); + typedef void (Assembler::*InstructionCondI)(Condition cond, uint32_t imm); + typedef void (Assembler::*InstructionCondL)(Condition cond, + Location* location); + typedef void (Assembler::*InstructionCondR)(Condition cond, Register rm); + typedef void (Assembler::*InstructionRL)(Register rn, Location* location); + typedef void (Assembler::*InstructionCond)(Condition cond); + typedef void (Assembler::*InstructionCondRR)(Condition cond, + Register rd, + Register rm); + typedef void (Assembler::*InstructionCondSizeROp)(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand); + typedef void (Assembler::*InstructionCondRRR)(Condition cond, + Register rd, + Register rn, + Register rm); + typedef void (Assembler::*InstructionCondBa)(Condition cond, + MemoryBarrier option); + typedef void (Assembler::*InstructionCondRwbDrl)(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + typedef void (Assembler::*InstructionCondRMop)(Condition cond, + Register rt, + const MemOperand& operand); + typedef void (Assembler::*InstructionCondRRMop)(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand); + typedef void (Assembler::*InstructionCondSizeRwbRl)(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + typedef void (Assembler::*InstructionCondRwbRl)(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + typedef void (Assembler::*InstructionCondSizeRMop)(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + typedef void (Assembler::*InstructionCondRL)(Condition cond, + Register rt, + Location* location); + typedef void (Assembler::*InstructionCondRRL)(Condition cond, + Register rt, + Register rt2, + Location* location); + typedef void (Assembler::*InstructionCondRRRR)( + Condition cond, Register rd, Register rn, Register rm, Register ra); + typedef void (Assembler::*InstructionCondRSr)(Condition cond, + Register rd, + SpecialRegister spec_reg); + typedef void (Assembler::*InstructionCondMsrOp)( + Condition cond, MaskedSpecialRegister spec_reg, const Operand& operand); + typedef void (Assembler::*InstructionCondSizeRRR)( + Condition cond, EncodingSize size, Register rd, Register rn, Register rm); + typedef void (Assembler::*InstructionCondSize)(Condition cond, + EncodingSize size); + typedef void (Assembler::*InstructionCondMop)(Condition cond, + const MemOperand& operand); + typedef void (Assembler::*InstructionCondSizeRl)(Condition cond, + EncodingSize size, + RegisterList registers); + typedef void (Assembler::*InstructionCondSizeOrl)(Condition cond, + EncodingSize size, + Register rt); + typedef void (Assembler::*InstructionCondSizeRR)(Condition cond, + EncodingSize size, + Register rd, + Register rm); + typedef void (Assembler::*InstructionDtQQQ)(DataType dt, + QRegister rd, + QRegister rn, + QRegister rm); + typedef void (Assembler::*InstructionCondRIOp)(Condition cond, + Register rd, + uint32_t imm, + const Operand& operand); + typedef void (Assembler::*InstructionCondRIR)(Condition cond, + Register rd, + uint32_t imm, + Register rn); + typedef void (Assembler::*InstructionCondRRRMop)(Condition cond, + Register rd, + Register rt, + Register rt2, + const MemOperand& operand); + typedef void (Assembler::*InstructionCondSizeI)(Condition cond, + EncodingSize size, + uint32_t imm); + typedef void (Assembler::*InstructionCondDtDDD)( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + typedef void (Assembler::*InstructionCondDtQQQ)( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + typedef void (Assembler::*InstructionCondDtQDD)( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + typedef void (Assembler::*InstructionCondDtDD)(Condition cond, + DataType dt, + DRegister rd, + DRegister rm); + typedef void (Assembler::*InstructionCondDtQQ)(Condition cond, + DataType dt, + QRegister rd, + QRegister rm); + typedef void (Assembler::*InstructionCondDtSS)(Condition cond, + DataType dt, + SRegister rd, + SRegister rm); + typedef void (Assembler::*InstructionCondDtSSS)( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + typedef void (Assembler::*InstructionCondDtDQQ)( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm); + typedef void (Assembler::*InstructionCondDtQQD)( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm); + typedef void (Assembler::*InstructionCondDtDDDop)(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand); + typedef void (Assembler::*InstructionCondDtQQQop)(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand); + typedef void (Assembler::*InstructionCondDtSSop)(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand); + typedef void (Assembler::*InstructionCondDtDDop)(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand); + typedef void (Assembler::*InstructionCondDtDtDS)( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm); + typedef void (Assembler::*InstructionCondDtDtSD)( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm); + typedef void (Assembler::*InstructionCondDtDtDDSi)(Condition cond, + DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm, + int32_t fbits); + typedef void (Assembler::*InstructionCondDtDtQQSi)(Condition cond, + DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm, + int32_t fbits); + typedef void (Assembler::*InstructionCondDtDtSSSi)(Condition cond, + DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm, + int32_t fbits); + typedef void (Assembler::*InstructionCondDtDtDD)( + Condition cond, DataType dt1, DataType dt2, DRegister rd, DRegister rm); + typedef void (Assembler::*InstructionCondDtDtQQ)( + Condition cond, DataType dt1, DataType dt2, QRegister rd, QRegister rm); + typedef void (Assembler::*InstructionCondDtDtDQ)( + Condition cond, DataType dt1, DataType dt2, DRegister rd, QRegister rm); + typedef void (Assembler::*InstructionCondDtDtQD)( + Condition cond, DataType dt1, DataType dt2, QRegister rd, DRegister rm); + typedef void (Assembler::*InstructionCondDtDtSS)( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm); + typedef void (Assembler::*InstructionDtDtDD)(DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm); + typedef void (Assembler::*InstructionDtDtQQ)(DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm); + typedef void (Assembler::*InstructionDtDtSS)(DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm); + typedef void (Assembler::*InstructionDtDtSD)(DataType dt1, + DataType dt2, + SRegister rd, + DRegister rm); + typedef void (Assembler::*InstructionCondDtQR)(Condition cond, + DataType dt, + QRegister rd, + Register rt); + typedef void (Assembler::*InstructionCondDtDR)(Condition cond, + DataType dt, + DRegister rd, + Register rt); + typedef void (Assembler::*InstructionCondDtDDx)(Condition cond, + DataType dt, + DRegister rd, + DRegisterLane rm); + typedef void (Assembler::*InstructionCondDtQDx)(Condition cond, + DataType dt, + QRegister rd, + DRegisterLane rm); + typedef void (Assembler::*InstructionCondDtDDDDop)(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister rm, + const DOperand& operand); + typedef void (Assembler::*InstructionCondDtQQQQop)(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + QRegister rm, + const QOperand& operand); + typedef void (Assembler::*InstructionCondDtNrlAmop)( + Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + typedef void (Assembler::*InstructionCondDtNrlMop)( + Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand); + typedef void (Assembler::*InstructionCondDtRwbDrl)(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + typedef void (Assembler::*InstructionCondDtRwbSrl)(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + typedef void (Assembler::*InstructionCondDtDL)(Condition cond, + DataType dt, + DRegister rd, + Location* location); + typedef void (Assembler::*InstructionCondDtDMop)(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand); + typedef void (Assembler::*InstructionCondDtSL)(Condition cond, + DataType dt, + SRegister rd, + Location* location); + typedef void (Assembler::*InstructionCondDtSMop)(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand); + typedef void (Assembler::*InstructionDtDDD)(DataType dt, + DRegister rd, + DRegister rn, + DRegister rm); + typedef void (Assembler::*InstructionDtSSS)(DataType dt, + SRegister rd, + SRegister rn, + SRegister rm); + typedef void (Assembler::*InstructionCondDtDDDx)(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm); + typedef void (Assembler::*InstructionCondDtQQDx)(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm); + typedef void (Assembler::*InstructionCondDtQDDx)(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm); + typedef void (Assembler::*InstructionCondRS)(Condition cond, + Register rt, + SRegister rn); + typedef void (Assembler::*InstructionCondSR)(Condition cond, + SRegister rn, + Register rt); + typedef void (Assembler::*InstructionCondRRD)(Condition cond, + Register rt, + Register rt2, + DRegister rm); + typedef void (Assembler::*InstructionCondDRR)(Condition cond, + DRegister rm, + Register rt, + Register rt2); + typedef void (Assembler::*InstructionCondRRSS)( + Condition cond, Register rt, Register rt2, SRegister rm, SRegister rm1); + typedef void (Assembler::*InstructionCondSSRR)( + Condition cond, SRegister rm, SRegister rm1, Register rt, Register rt2); + typedef void (Assembler::*InstructionCondDtDxR)(Condition cond, + DataType dt, + DRegisterLane rd, + Register rt); + typedef void (Assembler::*InstructionCondDtQQop)(Condition cond, + DataType dt, + QRegister rd, + const QOperand& operand); + typedef void (Assembler::*InstructionCondDtRDx)(Condition cond, + DataType dt, + Register rt, + DRegisterLane rn); + typedef void (Assembler::*InstructionCondDtQD)(Condition cond, + DataType dt, + QRegister rd, + DRegister rm); + typedef void (Assembler::*InstructionCondDtDQ)(Condition cond, + DataType dt, + DRegister rd, + QRegister rm); + typedef void (Assembler::*InstructionCondRoaSfp)(Condition cond, + RegisterOrAPSR_nzcv rt, + SpecialFPRegister spec_reg); + typedef void (Assembler::*InstructionCondSfpR)(Condition cond, + SpecialFPRegister spec_reg, + Register rt); + typedef void (Assembler::*InstructionCondDtDDIr)(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + typedef void (Assembler::*InstructionCondDtQQIr)(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegister dm, + unsigned index); + typedef void (Assembler::*InstructionCondDtQDIr)(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + typedef void (Assembler::*InstructionCondDtDrl)(Condition cond, + DataType dt, + DRegisterList dreglist); + typedef void (Assembler::*InstructionCondDtSrl)(Condition cond, + DataType dt, + SRegisterList sreglist); + typedef void (Assembler::*InstructionCondDtDQQop)(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + typedef void (Assembler::*InstructionDtDD)(DataType dt, + DRegister rd, + DRegister rm); + typedef void (Assembler::*InstructionDtSS)(DataType dt, + SRegister rd, + SRegister rm); + typedef void (Assembler::*InstructionCondDtQDDop)(Condition cond, + DataType dt, + QRegister rd, + DRegister rm, + const DOperand& operand); + typedef void (Assembler::*InstructionCondDtDNrlD)( + Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm); + virtual void Delegate(InstructionType type, + InstructionCondSizeRROp /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + Register /*rd*/, + Register /*rn*/, + const Operand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kAdc) || (type == kAdcs) || (type == kAdd) || + (type == kAdds) || (type == kAnd) || (type == kAnds) || + (type == kAsr) || (type == kAsrs) || (type == kBic) || + (type == kBics) || (type == kEor) || (type == kEors) || + (type == kLsl) || (type == kLsls) || (type == kLsr) || + (type == kLsrs) || (type == kOrr) || (type == kOrrs) || + (type == kRor) || (type == kRors) || (type == kRsb) || + (type == kRsbs) || (type == kSbc) || (type == kSbcs) || + (type == kSub) || (type == kSubs)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondROp /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + const Operand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kAdd) || (type == kMovt) || (type == kMovw) || + (type == kSub) || (type == kSxtb16) || (type == kTeq) || + (type == kUxtb16)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionROp /*instruction*/, + Register /*rd*/, + const Operand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kAdds) || (type == kSubs)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRROp /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + Register /*rn*/, + const Operand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kAddw) || (type == kOrn) || (type == kOrns) || + (type == kPkhbt) || (type == kPkhtb) || (type == kRsc) || + (type == kRscs) || (type == kSubw) || (type == kSxtab) || + (type == kSxtab16) || (type == kSxtah) || (type == kUxtab) || + (type == kUxtab16) || (type == kUxtah)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeRL /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + Register /*rd*/, + Location* /*location*/) { + USE(type); + VIXL_ASSERT((type == kAdr) || (type == kLdr)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtQQ /*instruction*/, + DataType /*dt*/, + QRegister /*rd*/, + QRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVrinta) || (type == kVrintm) || (type == kVrintn) || + (type == kVrintp) || (type == kVrintx) || (type == kVrintz)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeL /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + Location* /*location*/) { + USE(type); + VIXL_ASSERT((type == kB)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRII /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + uint32_t /*lsb*/, + uint32_t /*width*/) { + USE(type); + VIXL_ASSERT((type == kBfc)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRRII /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + Register /*rn*/, + uint32_t /*lsb*/, + uint32_t /*width*/) { + USE(type); + VIXL_ASSERT((type == kBfi) || (type == kSbfx) || (type == kUbfx)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondI /*instruction*/, + Condition /*cond*/, + uint32_t /*imm*/) { + USE(type); + VIXL_ASSERT((type == kBkpt) || (type == kHlt) || (type == kHvc) || + (type == kSvc)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondL /*instruction*/, + Condition /*cond*/, + Location* /*location*/) { + USE(type); + VIXL_ASSERT((type == kBl) || (type == kBlx) || (type == kPld) || + (type == kPli)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondR /*instruction*/, + Condition /*cond*/, + Register /*rm*/) { + USE(type); + VIXL_ASSERT((type == kBlx) || (type == kBx) || (type == kBxj)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionRL /*instruction*/, + Register /*rn*/, + Location* /*location*/) { + USE(type); + VIXL_ASSERT((type == kCbnz) || (type == kCbz)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCond /*instruction*/, + Condition /*cond*/) { + USE(type); + VIXL_ASSERT((type == kClrex)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRR /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + Register /*rm*/) { + USE(type); + VIXL_ASSERT((type == kClz) || (type == kRbit) || (type == kRrx) || + (type == kRrxs)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeROp /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + Register /*rn*/, + const Operand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kCmn) || (type == kCmp) || (type == kMov) || + (type == kMovs) || (type == kMvn) || (type == kMvns) || + (type == kSxtb) || (type == kSxth) || (type == kTst) || + (type == kUxtb) || (type == kUxth)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRRR /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + Register /*rn*/, + Register /*rm*/) { + USE(type); + VIXL_ASSERT((type == kCrc32b) || (type == kCrc32cb) || (type == kCrc32ch) || + (type == kCrc32cw) || (type == kCrc32h) || (type == kCrc32w) || + (type == kMuls) || (type == kQadd) || (type == kQadd16) || + (type == kQadd8) || (type == kQasx) || (type == kQdadd) || + (type == kQdsub) || (type == kQsax) || (type == kQsub) || + (type == kQsub16) || (type == kQsub8) || (type == kSadd16) || + (type == kSadd8) || (type == kSasx) || (type == kSdiv) || + (type == kSel) || (type == kShadd16) || (type == kShadd8) || + (type == kShasx) || (type == kShsax) || (type == kShsub16) || + (type == kShsub8) || (type == kSmmul) || (type == kSmmulr) || + (type == kSmuad) || (type == kSmuadx) || (type == kSmulbb) || + (type == kSmulbt) || (type == kSmultb) || (type == kSmultt) || + (type == kSmulwb) || (type == kSmulwt) || (type == kSmusd) || + (type == kSmusdx) || (type == kSsax) || (type == kSsub16) || + (type == kSsub8) || (type == kUadd16) || (type == kUadd8) || + (type == kUasx) || (type == kUdiv) || (type == kUhadd16) || + (type == kUhadd8) || (type == kUhasx) || (type == kUhsax) || + (type == kUhsub16) || (type == kUhsub8) || (type == kUqadd16) || + (type == kUqadd8) || (type == kUqasx) || (type == kUqsax) || + (type == kUqsub16) || (type == kUqsub8) || (type == kUsad8) || + (type == kUsax) || (type == kUsub16) || (type == kUsub8)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondBa /*instruction*/, + Condition /*cond*/, + MemoryBarrier /*option*/) { + USE(type); + VIXL_ASSERT((type == kDmb) || (type == kDsb) || (type == kIsb)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRwbDrl /*instruction*/, + Condition /*cond*/, + Register /*rn*/, + WriteBack /*write_back*/, + DRegisterList /*dreglist*/) { + USE(type); + VIXL_ASSERT((type == kFldmdbx) || (type == kFldmiax) || + (type == kFstmdbx) || (type == kFstmiax)); + UnimplementedDelegate(type); + } + virtual void DelegateIt(Condition /*cond*/, uint16_t /*mask*/) { + UnimplementedDelegate(kIt); + } + virtual void Delegate(InstructionType type, + InstructionCondRMop /*instruction*/, + Condition /*cond*/, + Register /*rt*/, + const MemOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kLda) || (type == kLdab) || (type == kLdaex) || + (type == kLdaexb) || (type == kLdaexh) || (type == kLdah) || + (type == kLdrex) || (type == kLdrexb) || (type == kLdrexh) || + (type == kStl) || (type == kStlb) || (type == kStlh)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRRMop /*instruction*/, + Condition /*cond*/, + Register /*rt*/, + Register /*rt2*/, + const MemOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kLdaexd) || (type == kLdrd) || (type == kLdrexd) || + (type == kStlex) || (type == kStlexb) || (type == kStlexh) || + (type == kStrd) || (type == kStrex) || (type == kStrexb) || + (type == kStrexh)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeRwbRl /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + Register /*rn*/, + WriteBack /*write_back*/, + RegisterList /*registers*/) { + USE(type); + VIXL_ASSERT((type == kLdm) || (type == kLdmfd) || (type == kStm) || + (type == kStmdb) || (type == kStmea)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRwbRl /*instruction*/, + Condition /*cond*/, + Register /*rn*/, + WriteBack /*write_back*/, + RegisterList /*registers*/) { + USE(type); + VIXL_ASSERT((type == kLdmda) || (type == kLdmdb) || (type == kLdmea) || + (type == kLdmed) || (type == kLdmfa) || (type == kLdmib) || + (type == kStmda) || (type == kStmed) || (type == kStmfa) || + (type == kStmfd) || (type == kStmib)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeRMop /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + Register /*rt*/, + const MemOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kLdr) || (type == kLdrb) || (type == kLdrh) || + (type == kLdrsb) || (type == kLdrsh) || (type == kStr) || + (type == kStrb) || (type == kStrh)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRL /*instruction*/, + Condition /*cond*/, + Register /*rt*/, + Location* /*location*/) { + USE(type); + VIXL_ASSERT((type == kLdrb) || (type == kLdrh) || (type == kLdrsb) || + (type == kLdrsh)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRRL /*instruction*/, + Condition /*cond*/, + Register /*rt*/, + Register /*rt2*/, + Location* /*location*/) { + USE(type); + VIXL_ASSERT((type == kLdrd)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRRRR /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + Register /*rn*/, + Register /*rm*/, + Register /*ra*/) { + USE(type); + VIXL_ASSERT((type == kMla) || (type == kMlas) || (type == kMls) || + (type == kSmlabb) || (type == kSmlabt) || (type == kSmlad) || + (type == kSmladx) || (type == kSmlal) || (type == kSmlalbb) || + (type == kSmlalbt) || (type == kSmlald) || (type == kSmlaldx) || + (type == kSmlals) || (type == kSmlaltb) || (type == kSmlaltt) || + (type == kSmlatb) || (type == kSmlatt) || (type == kSmlawb) || + (type == kSmlawt) || (type == kSmlsd) || (type == kSmlsdx) || + (type == kSmlsld) || (type == kSmlsldx) || (type == kSmmla) || + (type == kSmmlar) || (type == kSmmls) || (type == kSmmlsr) || + (type == kSmull) || (type == kSmulls) || (type == kUmaal) || + (type == kUmlal) || (type == kUmlals) || (type == kUmull) || + (type == kUmulls) || (type == kUsada8)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRSr /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + SpecialRegister /*spec_reg*/) { + USE(type); + VIXL_ASSERT((type == kMrs)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondMsrOp /*instruction*/, + Condition /*cond*/, + MaskedSpecialRegister /*spec_reg*/, + const Operand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kMsr)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeRRR /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + Register /*rd*/, + Register /*rn*/, + Register /*rm*/) { + USE(type); + VIXL_ASSERT((type == kMul)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSize /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/) { + USE(type); + VIXL_ASSERT((type == kNop) || (type == kYield)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondMop /*instruction*/, + Condition /*cond*/, + const MemOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kPld) || (type == kPldw) || (type == kPli)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeRl /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + RegisterList /*registers*/) { + USE(type); + VIXL_ASSERT((type == kPop) || (type == kPush)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeOrl /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + Register /*rt*/) { + USE(type); + VIXL_ASSERT((type == kPop) || (type == kPush)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeRR /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + Register /*rd*/, + Register /*rm*/) { + USE(type); + VIXL_ASSERT((type == kRev) || (type == kRev16) || (type == kRevsh)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtQQQ /*instruction*/, + DataType /*dt*/, + QRegister /*rd*/, + QRegister /*rn*/, + QRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVmaxnm) || (type == kVminnm)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRIOp /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + uint32_t /*imm*/, + const Operand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kSsat) || (type == kUsat)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRIR /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + uint32_t /*imm*/, + Register /*rn*/) { + USE(type); + VIXL_ASSERT((type == kSsat16) || (type == kUsat16)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRRRMop /*instruction*/, + Condition /*cond*/, + Register /*rd*/, + Register /*rt*/, + Register /*rt2*/, + const MemOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kStlexd) || (type == kStrexd)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSizeI /*instruction*/, + Condition /*cond*/, + EncodingSize /*size*/, + uint32_t /*imm*/) { + USE(type); + VIXL_ASSERT((type == kUdf)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDDD /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + DRegister /*rn*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVaba) || (type == kVabd) || (type == kVacge) || + (type == kVacgt) || (type == kVacle) || (type == kVaclt) || + (type == kVadd) || (type == kVbif) || (type == kVbit) || + (type == kVbsl) || (type == kVceq) || (type == kVcge) || + (type == kVcgt) || (type == kVcle) || (type == kVclt) || + (type == kVdiv) || (type == kVeor) || (type == kVfma) || + (type == kVfms) || (type == kVfnma) || (type == kVfnms) || + (type == kVhadd) || (type == kVhsub) || (type == kVmax) || + (type == kVmin) || (type == kVmla) || (type == kVmls) || + (type == kVmul) || (type == kVnmla) || (type == kVnmls) || + (type == kVnmul) || (type == kVpadd) || (type == kVpmax) || + (type == kVpmin) || (type == kVqadd) || (type == kVqdmulh) || + (type == kVqrdmulh) || (type == kVqrshl) || (type == kVqsub) || + (type == kVrecps) || (type == kVrhadd) || (type == kVrshl) || + (type == kVrsqrts) || (type == kVsub) || (type == kVtst)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQQQ /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + QRegister /*rn*/, + QRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVaba) || (type == kVabd) || (type == kVacge) || + (type == kVacgt) || (type == kVacle) || (type == kVaclt) || + (type == kVadd) || (type == kVbif) || (type == kVbit) || + (type == kVbsl) || (type == kVceq) || (type == kVcge) || + (type == kVcgt) || (type == kVcle) || (type == kVclt) || + (type == kVeor) || (type == kVfma) || (type == kVfms) || + (type == kVhadd) || (type == kVhsub) || (type == kVmax) || + (type == kVmin) || (type == kVmla) || (type == kVmls) || + (type == kVmul) || (type == kVqadd) || (type == kVqdmulh) || + (type == kVqrdmulh) || (type == kVqrshl) || (type == kVqsub) || + (type == kVrecps) || (type == kVrhadd) || (type == kVrshl) || + (type == kVrsqrts) || (type == kVsub) || (type == kVtst)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQDD /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + DRegister /*rn*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVabal) || (type == kVabdl) || (type == kVaddl) || + (type == kVmlal) || (type == kVmlsl) || (type == kVmull) || + (type == kVqdmlal) || (type == kVqdmlsl) || + (type == kVqdmull) || (type == kVsubl)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDD /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVabs) || (type == kVcls) || (type == kVclz) || + (type == kVcnt) || (type == kVneg) || (type == kVpadal) || + (type == kVpaddl) || (type == kVqabs) || (type == kVqneg) || + (type == kVrecpe) || (type == kVrev16) || (type == kVrev32) || + (type == kVrev64) || (type == kVrintr) || (type == kVrintx) || + (type == kVrintz) || (type == kVrsqrte) || (type == kVsqrt) || + (type == kVswp) || (type == kVtrn) || (type == kVuzp) || + (type == kVzip)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQQ /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + QRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVabs) || (type == kVcls) || (type == kVclz) || + (type == kVcnt) || (type == kVneg) || (type == kVpadal) || + (type == kVpaddl) || (type == kVqabs) || (type == kVqneg) || + (type == kVrecpe) || (type == kVrev16) || (type == kVrev32) || + (type == kVrev64) || (type == kVrsqrte) || (type == kVswp) || + (type == kVtrn) || (type == kVuzp) || (type == kVzip)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtSS /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + SRegister /*rd*/, + SRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVabs) || (type == kVneg) || (type == kVrintr) || + (type == kVrintx) || (type == kVrintz) || (type == kVsqrt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtSSS /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + SRegister /*rd*/, + SRegister /*rn*/, + SRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVadd) || (type == kVdiv) || (type == kVfma) || + (type == kVfms) || (type == kVfnma) || (type == kVfnms) || + (type == kVmla) || (type == kVmls) || (type == kVmul) || + (type == kVnmla) || (type == kVnmls) || (type == kVnmul) || + (type == kVsub)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDQQ /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + QRegister /*rn*/, + QRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVaddhn) || (type == kVraddhn) || (type == kVrsubhn) || + (type == kVsubhn)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQQD /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + QRegister /*rn*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVaddw) || (type == kVsubw)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDDDop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + DRegister /*rn*/, + const DOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVand) || (type == kVbic) || (type == kVceq) || + (type == kVcge) || (type == kVcgt) || (type == kVcle) || + (type == kVclt) || (type == kVorn) || (type == kVorr) || + (type == kVqshl) || (type == kVqshlu) || (type == kVrshr) || + (type == kVrsra) || (type == kVshl) || (type == kVshr) || + (type == kVsli) || (type == kVsra) || (type == kVsri)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQQQop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + QRegister /*rn*/, + const QOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVand) || (type == kVbic) || (type == kVceq) || + (type == kVcge) || (type == kVcgt) || (type == kVcle) || + (type == kVclt) || (type == kVorn) || (type == kVorr) || + (type == kVqshl) || (type == kVqshlu) || (type == kVrshr) || + (type == kVrsra) || (type == kVshl) || (type == kVshr) || + (type == kVsli) || (type == kVsra) || (type == kVsri)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtSSop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + SRegister /*rd*/, + const SOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVcmp) || (type == kVcmpe) || (type == kVmov)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDDop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + const DOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVcmp) || (type == kVcmpe) || (type == kVmov) || + (type == kVmvn)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtDS /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + DRegister /*rd*/, + SRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvt) || (type == kVcvtb) || (type == kVcvtt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtSD /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + SRegister /*rd*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvt) || (type == kVcvtb) || (type == kVcvtr) || + (type == kVcvtt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtDDSi /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + DRegister /*rd*/, + DRegister /*rm*/, + int32_t /*fbits*/) { + USE(type); + VIXL_ASSERT((type == kVcvt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtQQSi /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + QRegister /*rd*/, + QRegister /*rm*/, + int32_t /*fbits*/) { + USE(type); + VIXL_ASSERT((type == kVcvt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtSSSi /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + SRegister /*rd*/, + SRegister /*rm*/, + int32_t /*fbits*/) { + USE(type); + VIXL_ASSERT((type == kVcvt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtDD /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + DRegister /*rd*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtQQ /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + QRegister /*rd*/, + QRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtDQ /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + DRegister /*rd*/, + QRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtQD /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + QRegister /*rd*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDtSS /*instruction*/, + Condition /*cond*/, + DataType /*dt1*/, + DataType /*dt2*/, + SRegister /*rd*/, + SRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvt) || (type == kVcvtb) || (type == kVcvtr) || + (type == kVcvtt)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtDtDD /*instruction*/, + DataType /*dt1*/, + DataType /*dt2*/, + DRegister /*rd*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvta) || (type == kVcvtm) || (type == kVcvtn) || + (type == kVcvtp)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtDtQQ /*instruction*/, + DataType /*dt1*/, + DataType /*dt2*/, + QRegister /*rd*/, + QRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvta) || (type == kVcvtm) || (type == kVcvtn) || + (type == kVcvtp)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtDtSS /*instruction*/, + DataType /*dt1*/, + DataType /*dt2*/, + SRegister /*rd*/, + SRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvta) || (type == kVcvtm) || (type == kVcvtn) || + (type == kVcvtp)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtDtSD /*instruction*/, + DataType /*dt1*/, + DataType /*dt2*/, + SRegister /*rd*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVcvta) || (type == kVcvtm) || (type == kVcvtn) || + (type == kVcvtp)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQR /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + Register /*rt*/) { + USE(type); + VIXL_ASSERT((type == kVdup)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDR /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + Register /*rt*/) { + USE(type); + VIXL_ASSERT((type == kVdup)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDDx /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + DRegisterLane /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVdup)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQDx /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + DRegisterLane /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVdup)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDDDDop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + DRegister /*rn*/, + DRegister /*rm*/, + const DOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVext)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQQQQop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + QRegister /*rn*/, + QRegister /*rm*/, + const QOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVext)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtNrlAmop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + const NeonRegisterList& /*nreglist*/, + const AlignedMemOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVld1) || (type == kVld2) || (type == kVld3) || + (type == kVld4) || (type == kVst1) || (type == kVst2) || + (type == kVst3) || (type == kVst4)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtNrlMop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + const NeonRegisterList& /*nreglist*/, + const MemOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVld3) || (type == kVst3)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtRwbDrl /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + Register /*rn*/, + WriteBack /*write_back*/, + DRegisterList /*dreglist*/) { + USE(type); + VIXL_ASSERT((type == kVldm) || (type == kVldmdb) || (type == kVldmia) || + (type == kVstm) || (type == kVstmdb) || (type == kVstmia)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtRwbSrl /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + Register /*rn*/, + WriteBack /*write_back*/, + SRegisterList /*sreglist*/) { + USE(type); + VIXL_ASSERT((type == kVldm) || (type == kVldmdb) || (type == kVldmia) || + (type == kVstm) || (type == kVstmdb) || (type == kVstmia)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDL /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + Location* /*location*/) { + USE(type); + VIXL_ASSERT((type == kVldr)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDMop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + const MemOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVldr) || (type == kVstr)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtSL /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + SRegister /*rd*/, + Location* /*location*/) { + USE(type); + VIXL_ASSERT((type == kVldr)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtSMop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + SRegister /*rd*/, + const MemOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVldr) || (type == kVstr)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtDDD /*instruction*/, + DataType /*dt*/, + DRegister /*rd*/, + DRegister /*rn*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVmaxnm) || (type == kVminnm) || (type == kVseleq) || + (type == kVselge) || (type == kVselgt) || (type == kVselvs)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtSSS /*instruction*/, + DataType /*dt*/, + SRegister /*rd*/, + SRegister /*rn*/, + SRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVmaxnm) || (type == kVminnm) || (type == kVseleq) || + (type == kVselge) || (type == kVselgt) || (type == kVselvs)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDDDx /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + DRegister /*rn*/, + DRegisterLane /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVmla) || (type == kVmls) || (type == kVqdmulh) || + (type == kVqrdmulh)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQQDx /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + QRegister /*rn*/, + DRegisterLane /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVmla) || (type == kVmls) || (type == kVqdmulh) || + (type == kVqrdmulh)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQDDx /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + DRegister /*rn*/, + DRegisterLane /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVmlal) || (type == kVmlsl) || (type == kVqdmull)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRS /*instruction*/, + Condition /*cond*/, + Register /*rt*/, + SRegister /*rn*/) { + USE(type); + VIXL_ASSERT((type == kVmov)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSR /*instruction*/, + Condition /*cond*/, + SRegister /*rn*/, + Register /*rt*/) { + USE(type); + VIXL_ASSERT((type == kVmov)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRRD /*instruction*/, + Condition /*cond*/, + Register /*rt*/, + Register /*rt2*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVmov)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDRR /*instruction*/, + Condition /*cond*/, + DRegister /*rm*/, + Register /*rt*/, + Register /*rt2*/) { + USE(type); + VIXL_ASSERT((type == kVmov)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRRSS /*instruction*/, + Condition /*cond*/, + Register /*rt*/, + Register /*rt2*/, + SRegister /*rm*/, + SRegister /*rm1*/) { + USE(type); + VIXL_ASSERT((type == kVmov)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSSRR /*instruction*/, + Condition /*cond*/, + SRegister /*rm*/, + SRegister /*rm1*/, + Register /*rt*/, + Register /*rt2*/) { + USE(type); + VIXL_ASSERT((type == kVmov)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDxR /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegisterLane /*rd*/, + Register /*rt*/) { + USE(type); + VIXL_ASSERT((type == kVmov)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQQop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + const QOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVmov) || (type == kVmvn)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtRDx /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + Register /*rt*/, + DRegisterLane /*rn*/) { + USE(type); + VIXL_ASSERT((type == kVmov)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQD /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVmovl)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDQ /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + QRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVmovn) || (type == kVqmovn) || (type == kVqmovun)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondRoaSfp /*instruction*/, + Condition /*cond*/, + RegisterOrAPSR_nzcv /*rt*/, + SpecialFPRegister /*spec_reg*/) { + USE(type); + VIXL_ASSERT((type == kVmrs)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondSfpR /*instruction*/, + Condition /*cond*/, + SpecialFPRegister /*spec_reg*/, + Register /*rt*/) { + USE(type); + VIXL_ASSERT((type == kVmsr)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDDIr /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + DRegister /*rn*/, + DRegister /*dm*/, + unsigned /*index*/) { + USE(type); + VIXL_ASSERT((type == kVmul)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQQIr /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + QRegister /*rn*/, + DRegister /*dm*/, + unsigned /*index*/) { + USE(type); + VIXL_ASSERT((type == kVmul)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQDIr /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + DRegister /*rn*/, + DRegister /*dm*/, + unsigned /*index*/) { + USE(type); + VIXL_ASSERT((type == kVmull) || (type == kVqdmlal) || (type == kVqdmlsl)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDrl /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegisterList /*dreglist*/) { + USE(type); + VIXL_ASSERT((type == kVpop) || (type == kVpush)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtSrl /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + SRegisterList /*sreglist*/) { + USE(type); + VIXL_ASSERT((type == kVpop) || (type == kVpush)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDQQop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + QRegister /*rm*/, + const QOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVqrshrn) || (type == kVqrshrun) || + (type == kVqshrn) || (type == kVqshrun) || (type == kVrshrn) || + (type == kVshrn)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtDD /*instruction*/, + DataType /*dt*/, + DRegister /*rd*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVrinta) || (type == kVrintm) || (type == kVrintn) || + (type == kVrintp)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionDtSS /*instruction*/, + DataType /*dt*/, + SRegister /*rd*/, + SRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVrinta) || (type == kVrintm) || (type == kVrintn) || + (type == kVrintp)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtQDDop /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + QRegister /*rd*/, + DRegister /*rm*/, + const DOperand& /*operand*/) { + USE(type); + VIXL_ASSERT((type == kVshll)); + UnimplementedDelegate(type); + } + virtual void Delegate(InstructionType type, + InstructionCondDtDNrlD /*instruction*/, + Condition /*cond*/, + DataType /*dt*/, + DRegister /*rd*/, + const NeonRegisterList& /*nreglist*/, + DRegister /*rm*/) { + USE(type); + VIXL_ASSERT((type == kVtbl) || (type == kVtbx)); + UnimplementedDelegate(type); + } + + void adc(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void adc(Register rd, Register rn, const Operand& operand) { + adc(al, Best, rd, rn, operand); + } + void adc(Condition cond, Register rd, Register rn, const Operand& operand) { + adc(cond, Best, rd, rn, operand); + } + void adc(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + adc(al, size, rd, rn, operand); + } + + void adcs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void adcs(Register rd, Register rn, const Operand& operand) { + adcs(al, Best, rd, rn, operand); + } + void adcs(Condition cond, Register rd, Register rn, const Operand& operand) { + adcs(cond, Best, rd, rn, operand); + } + void adcs(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + adcs(al, size, rd, rn, operand); + } + + void add(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void add(Register rd, Register rn, const Operand& operand) { + add(al, Best, rd, rn, operand); + } + void add(Condition cond, Register rd, Register rn, const Operand& operand) { + add(cond, Best, rd, rn, operand); + } + void add(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + add(al, size, rd, rn, operand); + } + + void add(Condition cond, Register rd, const Operand& operand); + void add(Register rd, const Operand& operand) { add(al, rd, operand); } + + void adds(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void adds(Register rd, Register rn, const Operand& operand) { + adds(al, Best, rd, rn, operand); + } + void adds(Condition cond, Register rd, Register rn, const Operand& operand) { + adds(cond, Best, rd, rn, operand); + } + void adds(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + adds(al, size, rd, rn, operand); + } + + void adds(Register rd, const Operand& operand); + + void addw(Condition cond, Register rd, Register rn, const Operand& operand); + void addw(Register rd, Register rn, const Operand& operand) { + addw(al, rd, rn, operand); + } + + void adr(Condition cond, EncodingSize size, Register rd, Location* location); + bool adr_info(Condition cond, + EncodingSize size, + Register rd, + Location* location, + const struct ReferenceInfo** info); + void adr(Register rd, Location* location) { adr(al, Best, rd, location); } + void adr(Condition cond, Register rd, Location* location) { + adr(cond, Best, rd, location); + } + void adr(EncodingSize size, Register rd, Location* location) { + adr(al, size, rd, location); + } + + void and_(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void and_(Register rd, Register rn, const Operand& operand) { + and_(al, Best, rd, rn, operand); + } + void and_(Condition cond, Register rd, Register rn, const Operand& operand) { + and_(cond, Best, rd, rn, operand); + } + void and_(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + and_(al, size, rd, rn, operand); + } + + void ands(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void ands(Register rd, Register rn, const Operand& operand) { + ands(al, Best, rd, rn, operand); + } + void ands(Condition cond, Register rd, Register rn, const Operand& operand) { + ands(cond, Best, rd, rn, operand); + } + void ands(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + ands(al, size, rd, rn, operand); + } + + void asr(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + void asr(Register rd, Register rm, const Operand& operand) { + asr(al, Best, rd, rm, operand); + } + void asr(Condition cond, Register rd, Register rm, const Operand& operand) { + asr(cond, Best, rd, rm, operand); + } + void asr(EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + asr(al, size, rd, rm, operand); + } + + void asrs(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + void asrs(Register rd, Register rm, const Operand& operand) { + asrs(al, Best, rd, rm, operand); + } + void asrs(Condition cond, Register rd, Register rm, const Operand& operand) { + asrs(cond, Best, rd, rm, operand); + } + void asrs(EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + asrs(al, size, rd, rm, operand); + } + + void b(Condition cond, EncodingSize size, Location* location); + bool b_info(Condition cond, + EncodingSize size, + Location* location, + const struct ReferenceInfo** info); + void b(Location* location) { b(al, Best, location); } + void b(Condition cond, Location* location) { b(cond, Best, location); } + void b(EncodingSize size, Location* location) { b(al, size, location); } + + void bfc(Condition cond, Register rd, uint32_t lsb, uint32_t width); + void bfc(Register rd, uint32_t lsb, uint32_t width) { + bfc(al, rd, lsb, width); + } + + void bfi( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width); + void bfi(Register rd, Register rn, uint32_t lsb, uint32_t width) { + bfi(al, rd, rn, lsb, width); + } + + void bic(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void bic(Register rd, Register rn, const Operand& operand) { + bic(al, Best, rd, rn, operand); + } + void bic(Condition cond, Register rd, Register rn, const Operand& operand) { + bic(cond, Best, rd, rn, operand); + } + void bic(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + bic(al, size, rd, rn, operand); + } + + void bics(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void bics(Register rd, Register rn, const Operand& operand) { + bics(al, Best, rd, rn, operand); + } + void bics(Condition cond, Register rd, Register rn, const Operand& operand) { + bics(cond, Best, rd, rn, operand); + } + void bics(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + bics(al, size, rd, rn, operand); + } + + void bkpt(Condition cond, uint32_t imm); + void bkpt(uint32_t imm) { bkpt(al, imm); } + + void bl(Condition cond, Location* location); + bool bl_info(Condition cond, + Location* location, + const struct ReferenceInfo** info); + void bl(Location* location) { bl(al, location); } + + void blx(Condition cond, Location* location); + bool blx_info(Condition cond, + Location* location, + const struct ReferenceInfo** info); + void blx(Location* location) { blx(al, location); } + + void blx(Condition cond, Register rm); + void blx(Register rm) { blx(al, rm); } + + void bx(Condition cond, Register rm); + void bx(Register rm) { bx(al, rm); } + + void bxj(Condition cond, Register rm); + void bxj(Register rm) { bxj(al, rm); } + + void cbnz(Register rn, Location* location); + bool cbnz_info(Register rn, + Location* location, + const struct ReferenceInfo** info); + + void cbz(Register rn, Location* location); + bool cbz_info(Register rn, + Location* location, + const struct ReferenceInfo** info); + + void clrex(Condition cond); + void clrex() { clrex(al); } + + void clz(Condition cond, Register rd, Register rm); + void clz(Register rd, Register rm) { clz(al, rd, rm); } + + void cmn(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand); + void cmn(Register rn, const Operand& operand) { cmn(al, Best, rn, operand); } + void cmn(Condition cond, Register rn, const Operand& operand) { + cmn(cond, Best, rn, operand); + } + void cmn(EncodingSize size, Register rn, const Operand& operand) { + cmn(al, size, rn, operand); + } + + void cmp(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand); + void cmp(Register rn, const Operand& operand) { cmp(al, Best, rn, operand); } + void cmp(Condition cond, Register rn, const Operand& operand) { + cmp(cond, Best, rn, operand); + } + void cmp(EncodingSize size, Register rn, const Operand& operand) { + cmp(al, size, rn, operand); + } + + void crc32b(Condition cond, Register rd, Register rn, Register rm); + void crc32b(Register rd, Register rn, Register rm) { crc32b(al, rd, rn, rm); } + + void crc32cb(Condition cond, Register rd, Register rn, Register rm); + void crc32cb(Register rd, Register rn, Register rm) { + crc32cb(al, rd, rn, rm); + } + + void crc32ch(Condition cond, Register rd, Register rn, Register rm); + void crc32ch(Register rd, Register rn, Register rm) { + crc32ch(al, rd, rn, rm); + } + + void crc32cw(Condition cond, Register rd, Register rn, Register rm); + void crc32cw(Register rd, Register rn, Register rm) { + crc32cw(al, rd, rn, rm); + } + + void crc32h(Condition cond, Register rd, Register rn, Register rm); + void crc32h(Register rd, Register rn, Register rm) { crc32h(al, rd, rn, rm); } + + void crc32w(Condition cond, Register rd, Register rn, Register rm); + void crc32w(Register rd, Register rn, Register rm) { crc32w(al, rd, rn, rm); } + + void dmb(Condition cond, MemoryBarrier option); + void dmb(MemoryBarrier option) { dmb(al, option); } + + void dsb(Condition cond, MemoryBarrier option); + void dsb(MemoryBarrier option) { dsb(al, option); } + + void eor(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void eor(Register rd, Register rn, const Operand& operand) { + eor(al, Best, rd, rn, operand); + } + void eor(Condition cond, Register rd, Register rn, const Operand& operand) { + eor(cond, Best, rd, rn, operand); + } + void eor(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + eor(al, size, rd, rn, operand); + } + + void eors(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void eors(Register rd, Register rn, const Operand& operand) { + eors(al, Best, rd, rn, operand); + } + void eors(Condition cond, Register rd, Register rn, const Operand& operand) { + eors(cond, Best, rd, rn, operand); + } + void eors(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + eors(al, size, rd, rn, operand); + } + + void fldmdbx(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void fldmdbx(Register rn, WriteBack write_back, DRegisterList dreglist) { + fldmdbx(al, rn, write_back, dreglist); + } + + void fldmiax(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void fldmiax(Register rn, WriteBack write_back, DRegisterList dreglist) { + fldmiax(al, rn, write_back, dreglist); + } + + void fstmdbx(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void fstmdbx(Register rn, WriteBack write_back, DRegisterList dreglist) { + fstmdbx(al, rn, write_back, dreglist); + } + + void fstmiax(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void fstmiax(Register rn, WriteBack write_back, DRegisterList dreglist) { + fstmiax(al, rn, write_back, dreglist); + } + + void hlt(Condition cond, uint32_t imm); + void hlt(uint32_t imm) { hlt(al, imm); } + + void hvc(Condition cond, uint32_t imm); + void hvc(uint32_t imm) { hvc(al, imm); } + + void isb(Condition cond, MemoryBarrier option); + void isb(MemoryBarrier option) { isb(al, option); } + + void it(Condition cond, uint16_t mask); + + void lda(Condition cond, Register rt, const MemOperand& operand); + void lda(Register rt, const MemOperand& operand) { lda(al, rt, operand); } + + void ldab(Condition cond, Register rt, const MemOperand& operand); + void ldab(Register rt, const MemOperand& operand) { ldab(al, rt, operand); } + + void ldaex(Condition cond, Register rt, const MemOperand& operand); + void ldaex(Register rt, const MemOperand& operand) { ldaex(al, rt, operand); } + + void ldaexb(Condition cond, Register rt, const MemOperand& operand); + void ldaexb(Register rt, const MemOperand& operand) { + ldaexb(al, rt, operand); + } + + void ldaexd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand); + void ldaexd(Register rt, Register rt2, const MemOperand& operand) { + ldaexd(al, rt, rt2, operand); + } + + void ldaexh(Condition cond, Register rt, const MemOperand& operand); + void ldaexh(Register rt, const MemOperand& operand) { + ldaexh(al, rt, operand); + } + + void ldah(Condition cond, Register rt, const MemOperand& operand); + void ldah(Register rt, const MemOperand& operand) { ldah(al, rt, operand); } + + void ldm(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + void ldm(Register rn, WriteBack write_back, RegisterList registers) { + ldm(al, Best, rn, write_back, registers); + } + void ldm(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + ldm(cond, Best, rn, write_back, registers); + } + void ldm(EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + ldm(al, size, rn, write_back, registers); + } + + void ldmda(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void ldmda(Register rn, WriteBack write_back, RegisterList registers) { + ldmda(al, rn, write_back, registers); + } + + void ldmdb(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void ldmdb(Register rn, WriteBack write_back, RegisterList registers) { + ldmdb(al, rn, write_back, registers); + } + + void ldmea(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void ldmea(Register rn, WriteBack write_back, RegisterList registers) { + ldmea(al, rn, write_back, registers); + } + + void ldmed(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void ldmed(Register rn, WriteBack write_back, RegisterList registers) { + ldmed(al, rn, write_back, registers); + } + + void ldmfa(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void ldmfa(Register rn, WriteBack write_back, RegisterList registers) { + ldmfa(al, rn, write_back, registers); + } + + void ldmfd(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + void ldmfd(Register rn, WriteBack write_back, RegisterList registers) { + ldmfd(al, Best, rn, write_back, registers); + } + void ldmfd(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + ldmfd(cond, Best, rn, write_back, registers); + } + void ldmfd(EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + ldmfd(al, size, rn, write_back, registers); + } + + void ldmib(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void ldmib(Register rn, WriteBack write_back, RegisterList registers) { + ldmib(al, rn, write_back, registers); + } + + void ldr(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + void ldr(Register rt, const MemOperand& operand) { + ldr(al, Best, rt, operand); + } + void ldr(Condition cond, Register rt, const MemOperand& operand) { + ldr(cond, Best, rt, operand); + } + void ldr(EncodingSize size, Register rt, const MemOperand& operand) { + ldr(al, size, rt, operand); + } + + void ldr(Condition cond, EncodingSize size, Register rt, Location* location); + bool ldr_info(Condition cond, + EncodingSize size, + Register rt, + Location* location, + const struct ReferenceInfo** info); + void ldr(Register rt, Location* location) { ldr(al, Best, rt, location); } + void ldr(Condition cond, Register rt, Location* location) { + ldr(cond, Best, rt, location); + } + void ldr(EncodingSize size, Register rt, Location* location) { + ldr(al, size, rt, location); + } + + void ldrb(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + void ldrb(Register rt, const MemOperand& operand) { + ldrb(al, Best, rt, operand); + } + void ldrb(Condition cond, Register rt, const MemOperand& operand) { + ldrb(cond, Best, rt, operand); + } + void ldrb(EncodingSize size, Register rt, const MemOperand& operand) { + ldrb(al, size, rt, operand); + } + + void ldrb(Condition cond, Register rt, Location* location); + bool ldrb_info(Condition cond, + Register rt, + Location* location, + const struct ReferenceInfo** info); + void ldrb(Register rt, Location* location) { ldrb(al, rt, location); } + + void ldrd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand); + void ldrd(Register rt, Register rt2, const MemOperand& operand) { + ldrd(al, rt, rt2, operand); + } + + void ldrd(Condition cond, Register rt, Register rt2, Location* location); + bool ldrd_info(Condition cond, + Register rt, + Register rt2, + Location* location, + const struct ReferenceInfo** info); + void ldrd(Register rt, Register rt2, Location* location) { + ldrd(al, rt, rt2, location); + } + + void ldrex(Condition cond, Register rt, const MemOperand& operand); + void ldrex(Register rt, const MemOperand& operand) { ldrex(al, rt, operand); } + + void ldrexb(Condition cond, Register rt, const MemOperand& operand); + void ldrexb(Register rt, const MemOperand& operand) { + ldrexb(al, rt, operand); + } + + void ldrexd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand); + void ldrexd(Register rt, Register rt2, const MemOperand& operand) { + ldrexd(al, rt, rt2, operand); + } + + void ldrexh(Condition cond, Register rt, const MemOperand& operand); + void ldrexh(Register rt, const MemOperand& operand) { + ldrexh(al, rt, operand); + } + + void ldrh(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + void ldrh(Register rt, const MemOperand& operand) { + ldrh(al, Best, rt, operand); + } + void ldrh(Condition cond, Register rt, const MemOperand& operand) { + ldrh(cond, Best, rt, operand); + } + void ldrh(EncodingSize size, Register rt, const MemOperand& operand) { + ldrh(al, size, rt, operand); + } + + void ldrh(Condition cond, Register rt, Location* location); + bool ldrh_info(Condition cond, + Register rt, + Location* location, + const struct ReferenceInfo** info); + void ldrh(Register rt, Location* location) { ldrh(al, rt, location); } + + void ldrsb(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + void ldrsb(Register rt, const MemOperand& operand) { + ldrsb(al, Best, rt, operand); + } + void ldrsb(Condition cond, Register rt, const MemOperand& operand) { + ldrsb(cond, Best, rt, operand); + } + void ldrsb(EncodingSize size, Register rt, const MemOperand& operand) { + ldrsb(al, size, rt, operand); + } + + void ldrsb(Condition cond, Register rt, Location* location); + bool ldrsb_info(Condition cond, + Register rt, + Location* location, + const struct ReferenceInfo** info); + void ldrsb(Register rt, Location* location) { ldrsb(al, rt, location); } + + void ldrsh(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + void ldrsh(Register rt, const MemOperand& operand) { + ldrsh(al, Best, rt, operand); + } + void ldrsh(Condition cond, Register rt, const MemOperand& operand) { + ldrsh(cond, Best, rt, operand); + } + void ldrsh(EncodingSize size, Register rt, const MemOperand& operand) { + ldrsh(al, size, rt, operand); + } + + void ldrsh(Condition cond, Register rt, Location* location); + bool ldrsh_info(Condition cond, + Register rt, + Location* location, + const struct ReferenceInfo** info); + void ldrsh(Register rt, Location* location) { ldrsh(al, rt, location); } + + void lsl(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + void lsl(Register rd, Register rm, const Operand& operand) { + lsl(al, Best, rd, rm, operand); + } + void lsl(Condition cond, Register rd, Register rm, const Operand& operand) { + lsl(cond, Best, rd, rm, operand); + } + void lsl(EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + lsl(al, size, rd, rm, operand); + } + + void lsls(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + void lsls(Register rd, Register rm, const Operand& operand) { + lsls(al, Best, rd, rm, operand); + } + void lsls(Condition cond, Register rd, Register rm, const Operand& operand) { + lsls(cond, Best, rd, rm, operand); + } + void lsls(EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + lsls(al, size, rd, rm, operand); + } + + void lsr(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + void lsr(Register rd, Register rm, const Operand& operand) { + lsr(al, Best, rd, rm, operand); + } + void lsr(Condition cond, Register rd, Register rm, const Operand& operand) { + lsr(cond, Best, rd, rm, operand); + } + void lsr(EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + lsr(al, size, rd, rm, operand); + } + + void lsrs(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + void lsrs(Register rd, Register rm, const Operand& operand) { + lsrs(al, Best, rd, rm, operand); + } + void lsrs(Condition cond, Register rd, Register rm, const Operand& operand) { + lsrs(cond, Best, rd, rm, operand); + } + void lsrs(EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + lsrs(al, size, rd, rm, operand); + } + + void mla(Condition cond, Register rd, Register rn, Register rm, Register ra); + void mla(Register rd, Register rn, Register rm, Register ra) { + mla(al, rd, rn, rm, ra); + } + + void mlas(Condition cond, Register rd, Register rn, Register rm, Register ra); + void mlas(Register rd, Register rn, Register rm, Register ra) { + mlas(al, rd, rn, rm, ra); + } + + void mls(Condition cond, Register rd, Register rn, Register rm, Register ra); + void mls(Register rd, Register rn, Register rm, Register ra) { + mls(al, rd, rn, rm, ra); + } + + void mov(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + void mov(Register rd, const Operand& operand) { mov(al, Best, rd, operand); } + void mov(Condition cond, Register rd, const Operand& operand) { + mov(cond, Best, rd, operand); + } + void mov(EncodingSize size, Register rd, const Operand& operand) { + mov(al, size, rd, operand); + } + + void movs(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + void movs(Register rd, const Operand& operand) { + movs(al, Best, rd, operand); + } + void movs(Condition cond, Register rd, const Operand& operand) { + movs(cond, Best, rd, operand); + } + void movs(EncodingSize size, Register rd, const Operand& operand) { + movs(al, size, rd, operand); + } + + void movt(Condition cond, Register rd, const Operand& operand); + void movt(Register rd, const Operand& operand) { movt(al, rd, operand); } + + void movw(Condition cond, Register rd, const Operand& operand); + void movw(Register rd, const Operand& operand) { movw(al, rd, operand); } + + void mrs(Condition cond, Register rd, SpecialRegister spec_reg); + void mrs(Register rd, SpecialRegister spec_reg) { mrs(al, rd, spec_reg); } + + void msr(Condition cond, + MaskedSpecialRegister spec_reg, + const Operand& operand); + void msr(MaskedSpecialRegister spec_reg, const Operand& operand) { + msr(al, spec_reg, operand); + } + + void mul( + Condition cond, EncodingSize size, Register rd, Register rn, Register rm); + void mul(Register rd, Register rn, Register rm) { mul(al, Best, rd, rn, rm); } + void mul(Condition cond, Register rd, Register rn, Register rm) { + mul(cond, Best, rd, rn, rm); + } + void mul(EncodingSize size, Register rd, Register rn, Register rm) { + mul(al, size, rd, rn, rm); + } + + void muls(Condition cond, Register rd, Register rn, Register rm); + void muls(Register rd, Register rn, Register rm) { muls(al, rd, rn, rm); } + + void mvn(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + void mvn(Register rd, const Operand& operand) { mvn(al, Best, rd, operand); } + void mvn(Condition cond, Register rd, const Operand& operand) { + mvn(cond, Best, rd, operand); + } + void mvn(EncodingSize size, Register rd, const Operand& operand) { + mvn(al, size, rd, operand); + } + + void mvns(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + void mvns(Register rd, const Operand& operand) { + mvns(al, Best, rd, operand); + } + void mvns(Condition cond, Register rd, const Operand& operand) { + mvns(cond, Best, rd, operand); + } + void mvns(EncodingSize size, Register rd, const Operand& operand) { + mvns(al, size, rd, operand); + } + + void nop(Condition cond, EncodingSize size); + void nop() { nop(al, Best); } + void nop(Condition cond) { nop(cond, Best); } + void nop(EncodingSize size) { nop(al, size); } + + void orn(Condition cond, Register rd, Register rn, const Operand& operand); + void orn(Register rd, Register rn, const Operand& operand) { + orn(al, rd, rn, operand); + } + + void orns(Condition cond, Register rd, Register rn, const Operand& operand); + void orns(Register rd, Register rn, const Operand& operand) { + orns(al, rd, rn, operand); + } + + void orr(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void orr(Register rd, Register rn, const Operand& operand) { + orr(al, Best, rd, rn, operand); + } + void orr(Condition cond, Register rd, Register rn, const Operand& operand) { + orr(cond, Best, rd, rn, operand); + } + void orr(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + orr(al, size, rd, rn, operand); + } + + void orrs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void orrs(Register rd, Register rn, const Operand& operand) { + orrs(al, Best, rd, rn, operand); + } + void orrs(Condition cond, Register rd, Register rn, const Operand& operand) { + orrs(cond, Best, rd, rn, operand); + } + void orrs(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + orrs(al, size, rd, rn, operand); + } + + void pkhbt(Condition cond, Register rd, Register rn, const Operand& operand); + void pkhbt(Register rd, Register rn, const Operand& operand) { + pkhbt(al, rd, rn, operand); + } + + void pkhtb(Condition cond, Register rd, Register rn, const Operand& operand); + void pkhtb(Register rd, Register rn, const Operand& operand) { + pkhtb(al, rd, rn, operand); + } + + void pld(Condition cond, Location* location); + bool pld_info(Condition cond, + Location* location, + const struct ReferenceInfo** info); + void pld(Location* location) { pld(al, location); } + + void pld(Condition cond, const MemOperand& operand); + void pld(const MemOperand& operand) { pld(al, operand); } + + void pldw(Condition cond, const MemOperand& operand); + void pldw(const MemOperand& operand) { pldw(al, operand); } + + void pli(Condition cond, const MemOperand& operand); + void pli(const MemOperand& operand) { pli(al, operand); } + + void pli(Condition cond, Location* location); + bool pli_info(Condition cond, + Location* location, + const struct ReferenceInfo** info); + void pli(Location* location) { pli(al, location); } + + void pop(Condition cond, EncodingSize size, RegisterList registers); + void pop(RegisterList registers) { pop(al, Best, registers); } + void pop(Condition cond, RegisterList registers) { + pop(cond, Best, registers); + } + void pop(EncodingSize size, RegisterList registers) { + pop(al, size, registers); + } + + void pop(Condition cond, EncodingSize size, Register rt); + void pop(Register rt) { pop(al, Best, rt); } + void pop(Condition cond, Register rt) { pop(cond, Best, rt); } + void pop(EncodingSize size, Register rt) { pop(al, size, rt); } + + void push(Condition cond, EncodingSize size, RegisterList registers); + void push(RegisterList registers) { push(al, Best, registers); } + void push(Condition cond, RegisterList registers) { + push(cond, Best, registers); + } + void push(EncodingSize size, RegisterList registers) { + push(al, size, registers); + } + + void push(Condition cond, EncodingSize size, Register rt); + void push(Register rt) { push(al, Best, rt); } + void push(Condition cond, Register rt) { push(cond, Best, rt); } + void push(EncodingSize size, Register rt) { push(al, size, rt); } + + void qadd(Condition cond, Register rd, Register rm, Register rn); + void qadd(Register rd, Register rm, Register rn) { qadd(al, rd, rm, rn); } + + void qadd16(Condition cond, Register rd, Register rn, Register rm); + void qadd16(Register rd, Register rn, Register rm) { qadd16(al, rd, rn, rm); } + + void qadd8(Condition cond, Register rd, Register rn, Register rm); + void qadd8(Register rd, Register rn, Register rm) { qadd8(al, rd, rn, rm); } + + void qasx(Condition cond, Register rd, Register rn, Register rm); + void qasx(Register rd, Register rn, Register rm) { qasx(al, rd, rn, rm); } + + void qdadd(Condition cond, Register rd, Register rm, Register rn); + void qdadd(Register rd, Register rm, Register rn) { qdadd(al, rd, rm, rn); } + + void qdsub(Condition cond, Register rd, Register rm, Register rn); + void qdsub(Register rd, Register rm, Register rn) { qdsub(al, rd, rm, rn); } + + void qsax(Condition cond, Register rd, Register rn, Register rm); + void qsax(Register rd, Register rn, Register rm) { qsax(al, rd, rn, rm); } + + void qsub(Condition cond, Register rd, Register rm, Register rn); + void qsub(Register rd, Register rm, Register rn) { qsub(al, rd, rm, rn); } + + void qsub16(Condition cond, Register rd, Register rn, Register rm); + void qsub16(Register rd, Register rn, Register rm) { qsub16(al, rd, rn, rm); } + + void qsub8(Condition cond, Register rd, Register rn, Register rm); + void qsub8(Register rd, Register rn, Register rm) { qsub8(al, rd, rn, rm); } + + void rbit(Condition cond, Register rd, Register rm); + void rbit(Register rd, Register rm) { rbit(al, rd, rm); } + + void rev(Condition cond, EncodingSize size, Register rd, Register rm); + void rev(Register rd, Register rm) { rev(al, Best, rd, rm); } + void rev(Condition cond, Register rd, Register rm) { + rev(cond, Best, rd, rm); + } + void rev(EncodingSize size, Register rd, Register rm) { + rev(al, size, rd, rm); + } + + void rev16(Condition cond, EncodingSize size, Register rd, Register rm); + void rev16(Register rd, Register rm) { rev16(al, Best, rd, rm); } + void rev16(Condition cond, Register rd, Register rm) { + rev16(cond, Best, rd, rm); + } + void rev16(EncodingSize size, Register rd, Register rm) { + rev16(al, size, rd, rm); + } + + void revsh(Condition cond, EncodingSize size, Register rd, Register rm); + void revsh(Register rd, Register rm) { revsh(al, Best, rd, rm); } + void revsh(Condition cond, Register rd, Register rm) { + revsh(cond, Best, rd, rm); + } + void revsh(EncodingSize size, Register rd, Register rm) { + revsh(al, size, rd, rm); + } + + void ror(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + void ror(Register rd, Register rm, const Operand& operand) { + ror(al, Best, rd, rm, operand); + } + void ror(Condition cond, Register rd, Register rm, const Operand& operand) { + ror(cond, Best, rd, rm, operand); + } + void ror(EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + ror(al, size, rd, rm, operand); + } + + void rors(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + void rors(Register rd, Register rm, const Operand& operand) { + rors(al, Best, rd, rm, operand); + } + void rors(Condition cond, Register rd, Register rm, const Operand& operand) { + rors(cond, Best, rd, rm, operand); + } + void rors(EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + rors(al, size, rd, rm, operand); + } + + void rrx(Condition cond, Register rd, Register rm); + void rrx(Register rd, Register rm) { rrx(al, rd, rm); } + + void rrxs(Condition cond, Register rd, Register rm); + void rrxs(Register rd, Register rm) { rrxs(al, rd, rm); } + + void rsb(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void rsb(Register rd, Register rn, const Operand& operand) { + rsb(al, Best, rd, rn, operand); + } + void rsb(Condition cond, Register rd, Register rn, const Operand& operand) { + rsb(cond, Best, rd, rn, operand); + } + void rsb(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + rsb(al, size, rd, rn, operand); + } + + void rsbs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void rsbs(Register rd, Register rn, const Operand& operand) { + rsbs(al, Best, rd, rn, operand); + } + void rsbs(Condition cond, Register rd, Register rn, const Operand& operand) { + rsbs(cond, Best, rd, rn, operand); + } + void rsbs(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + rsbs(al, size, rd, rn, operand); + } + + void rsc(Condition cond, Register rd, Register rn, const Operand& operand); + void rsc(Register rd, Register rn, const Operand& operand) { + rsc(al, rd, rn, operand); + } + + void rscs(Condition cond, Register rd, Register rn, const Operand& operand); + void rscs(Register rd, Register rn, const Operand& operand) { + rscs(al, rd, rn, operand); + } + + void sadd16(Condition cond, Register rd, Register rn, Register rm); + void sadd16(Register rd, Register rn, Register rm) { sadd16(al, rd, rn, rm); } + + void sadd8(Condition cond, Register rd, Register rn, Register rm); + void sadd8(Register rd, Register rn, Register rm) { sadd8(al, rd, rn, rm); } + + void sasx(Condition cond, Register rd, Register rn, Register rm); + void sasx(Register rd, Register rn, Register rm) { sasx(al, rd, rn, rm); } + + void sbc(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void sbc(Register rd, Register rn, const Operand& operand) { + sbc(al, Best, rd, rn, operand); + } + void sbc(Condition cond, Register rd, Register rn, const Operand& operand) { + sbc(cond, Best, rd, rn, operand); + } + void sbc(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + sbc(al, size, rd, rn, operand); + } + + void sbcs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void sbcs(Register rd, Register rn, const Operand& operand) { + sbcs(al, Best, rd, rn, operand); + } + void sbcs(Condition cond, Register rd, Register rn, const Operand& operand) { + sbcs(cond, Best, rd, rn, operand); + } + void sbcs(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + sbcs(al, size, rd, rn, operand); + } + + void sbfx( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width); + void sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width) { + sbfx(al, rd, rn, lsb, width); + } + + void sdiv(Condition cond, Register rd, Register rn, Register rm); + void sdiv(Register rd, Register rn, Register rm) { sdiv(al, rd, rn, rm); } + + void sel(Condition cond, Register rd, Register rn, Register rm); + void sel(Register rd, Register rn, Register rm) { sel(al, rd, rn, rm); } + + void shadd16(Condition cond, Register rd, Register rn, Register rm); + void shadd16(Register rd, Register rn, Register rm) { + shadd16(al, rd, rn, rm); + } + + void shadd8(Condition cond, Register rd, Register rn, Register rm); + void shadd8(Register rd, Register rn, Register rm) { shadd8(al, rd, rn, rm); } + + void shasx(Condition cond, Register rd, Register rn, Register rm); + void shasx(Register rd, Register rn, Register rm) { shasx(al, rd, rn, rm); } + + void shsax(Condition cond, Register rd, Register rn, Register rm); + void shsax(Register rd, Register rn, Register rm) { shsax(al, rd, rn, rm); } + + void shsub16(Condition cond, Register rd, Register rn, Register rm); + void shsub16(Register rd, Register rn, Register rm) { + shsub16(al, rd, rn, rm); + } + + void shsub8(Condition cond, Register rd, Register rn, Register rm); + void shsub8(Register rd, Register rn, Register rm) { shsub8(al, rd, rn, rm); } + + void smlabb( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smlabb(Register rd, Register rn, Register rm, Register ra) { + smlabb(al, rd, rn, rm, ra); + } + + void smlabt( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smlabt(Register rd, Register rn, Register rm, Register ra) { + smlabt(al, rd, rn, rm, ra); + } + + void smlad( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smlad(Register rd, Register rn, Register rm, Register ra) { + smlad(al, rd, rn, rm, ra); + } + + void smladx( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smladx(Register rd, Register rn, Register rm, Register ra) { + smladx(al, rd, rn, rm, ra); + } + + void smlal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlal(Register rdlo, Register rdhi, Register rn, Register rm) { + smlal(al, rdlo, rdhi, rn, rm); + } + + void smlalbb( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlalbb(Register rdlo, Register rdhi, Register rn, Register rm) { + smlalbb(al, rdlo, rdhi, rn, rm); + } + + void smlalbt( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlalbt(Register rdlo, Register rdhi, Register rn, Register rm) { + smlalbt(al, rdlo, rdhi, rn, rm); + } + + void smlald( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlald(Register rdlo, Register rdhi, Register rn, Register rm) { + smlald(al, rdlo, rdhi, rn, rm); + } + + void smlaldx( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlaldx(Register rdlo, Register rdhi, Register rn, Register rm) { + smlaldx(al, rdlo, rdhi, rn, rm); + } + + void smlals( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlals(Register rdlo, Register rdhi, Register rn, Register rm) { + smlals(al, rdlo, rdhi, rn, rm); + } + + void smlaltb( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlaltb(Register rdlo, Register rdhi, Register rn, Register rm) { + smlaltb(al, rdlo, rdhi, rn, rm); + } + + void smlaltt( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlaltt(Register rdlo, Register rdhi, Register rn, Register rm) { + smlaltt(al, rdlo, rdhi, rn, rm); + } + + void smlatb( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smlatb(Register rd, Register rn, Register rm, Register ra) { + smlatb(al, rd, rn, rm, ra); + } + + void smlatt( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smlatt(Register rd, Register rn, Register rm, Register ra) { + smlatt(al, rd, rn, rm, ra); + } + + void smlawb( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smlawb(Register rd, Register rn, Register rm, Register ra) { + smlawb(al, rd, rn, rm, ra); + } + + void smlawt( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smlawt(Register rd, Register rn, Register rm, Register ra) { + smlawt(al, rd, rn, rm, ra); + } + + void smlsd( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smlsd(Register rd, Register rn, Register rm, Register ra) { + smlsd(al, rd, rn, rm, ra); + } + + void smlsdx( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smlsdx(Register rd, Register rn, Register rm, Register ra) { + smlsdx(al, rd, rn, rm, ra); + } + + void smlsld( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlsld(Register rdlo, Register rdhi, Register rn, Register rm) { + smlsld(al, rdlo, rdhi, rn, rm); + } + + void smlsldx( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smlsldx(Register rdlo, Register rdhi, Register rn, Register rm) { + smlsldx(al, rdlo, rdhi, rn, rm); + } + + void smmla( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smmla(Register rd, Register rn, Register rm, Register ra) { + smmla(al, rd, rn, rm, ra); + } + + void smmlar( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smmlar(Register rd, Register rn, Register rm, Register ra) { + smmlar(al, rd, rn, rm, ra); + } + + void smmls( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smmls(Register rd, Register rn, Register rm, Register ra) { + smmls(al, rd, rn, rm, ra); + } + + void smmlsr( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void smmlsr(Register rd, Register rn, Register rm, Register ra) { + smmlsr(al, rd, rn, rm, ra); + } + + void smmul(Condition cond, Register rd, Register rn, Register rm); + void smmul(Register rd, Register rn, Register rm) { smmul(al, rd, rn, rm); } + + void smmulr(Condition cond, Register rd, Register rn, Register rm); + void smmulr(Register rd, Register rn, Register rm) { smmulr(al, rd, rn, rm); } + + void smuad(Condition cond, Register rd, Register rn, Register rm); + void smuad(Register rd, Register rn, Register rm) { smuad(al, rd, rn, rm); } + + void smuadx(Condition cond, Register rd, Register rn, Register rm); + void smuadx(Register rd, Register rn, Register rm) { smuadx(al, rd, rn, rm); } + + void smulbb(Condition cond, Register rd, Register rn, Register rm); + void smulbb(Register rd, Register rn, Register rm) { smulbb(al, rd, rn, rm); } + + void smulbt(Condition cond, Register rd, Register rn, Register rm); + void smulbt(Register rd, Register rn, Register rm) { smulbt(al, rd, rn, rm); } + + void smull( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smull(Register rdlo, Register rdhi, Register rn, Register rm) { + smull(al, rdlo, rdhi, rn, rm); + } + + void smulls( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void smulls(Register rdlo, Register rdhi, Register rn, Register rm) { + smulls(al, rdlo, rdhi, rn, rm); + } + + void smultb(Condition cond, Register rd, Register rn, Register rm); + void smultb(Register rd, Register rn, Register rm) { smultb(al, rd, rn, rm); } + + void smultt(Condition cond, Register rd, Register rn, Register rm); + void smultt(Register rd, Register rn, Register rm) { smultt(al, rd, rn, rm); } + + void smulwb(Condition cond, Register rd, Register rn, Register rm); + void smulwb(Register rd, Register rn, Register rm) { smulwb(al, rd, rn, rm); } + + void smulwt(Condition cond, Register rd, Register rn, Register rm); + void smulwt(Register rd, Register rn, Register rm) { smulwt(al, rd, rn, rm); } + + void smusd(Condition cond, Register rd, Register rn, Register rm); + void smusd(Register rd, Register rn, Register rm) { smusd(al, rd, rn, rm); } + + void smusdx(Condition cond, Register rd, Register rn, Register rm); + void smusdx(Register rd, Register rn, Register rm) { smusdx(al, rd, rn, rm); } + + void ssat(Condition cond, Register rd, uint32_t imm, const Operand& operand); + void ssat(Register rd, uint32_t imm, const Operand& operand) { + ssat(al, rd, imm, operand); + } + + void ssat16(Condition cond, Register rd, uint32_t imm, Register rn); + void ssat16(Register rd, uint32_t imm, Register rn) { + ssat16(al, rd, imm, rn); + } + + void ssax(Condition cond, Register rd, Register rn, Register rm); + void ssax(Register rd, Register rn, Register rm) { ssax(al, rd, rn, rm); } + + void ssub16(Condition cond, Register rd, Register rn, Register rm); + void ssub16(Register rd, Register rn, Register rm) { ssub16(al, rd, rn, rm); } + + void ssub8(Condition cond, Register rd, Register rn, Register rm); + void ssub8(Register rd, Register rn, Register rm) { ssub8(al, rd, rn, rm); } + + void stl(Condition cond, Register rt, const MemOperand& operand); + void stl(Register rt, const MemOperand& operand) { stl(al, rt, operand); } + + void stlb(Condition cond, Register rt, const MemOperand& operand); + void stlb(Register rt, const MemOperand& operand) { stlb(al, rt, operand); } + + void stlex(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + void stlex(Register rd, Register rt, const MemOperand& operand) { + stlex(al, rd, rt, operand); + } + + void stlexb(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + void stlexb(Register rd, Register rt, const MemOperand& operand) { + stlexb(al, rd, rt, operand); + } + + void stlexd(Condition cond, + Register rd, + Register rt, + Register rt2, + const MemOperand& operand); + void stlexd(Register rd, + Register rt, + Register rt2, + const MemOperand& operand) { + stlexd(al, rd, rt, rt2, operand); + } + + void stlexh(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + void stlexh(Register rd, Register rt, const MemOperand& operand) { + stlexh(al, rd, rt, operand); + } + + void stlh(Condition cond, Register rt, const MemOperand& operand); + void stlh(Register rt, const MemOperand& operand) { stlh(al, rt, operand); } + + void stm(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + void stm(Register rn, WriteBack write_back, RegisterList registers) { + stm(al, Best, rn, write_back, registers); + } + void stm(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + stm(cond, Best, rn, write_back, registers); + } + void stm(EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + stm(al, size, rn, write_back, registers); + } + + void stmda(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void stmda(Register rn, WriteBack write_back, RegisterList registers) { + stmda(al, rn, write_back, registers); + } + + void stmdb(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + void stmdb(Register rn, WriteBack write_back, RegisterList registers) { + stmdb(al, Best, rn, write_back, registers); + } + void stmdb(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + stmdb(cond, Best, rn, write_back, registers); + } + void stmdb(EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + stmdb(al, size, rn, write_back, registers); + } + + void stmea(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + void stmea(Register rn, WriteBack write_back, RegisterList registers) { + stmea(al, Best, rn, write_back, registers); + } + void stmea(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + stmea(cond, Best, rn, write_back, registers); + } + void stmea(EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + stmea(al, size, rn, write_back, registers); + } + + void stmed(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void stmed(Register rn, WriteBack write_back, RegisterList registers) { + stmed(al, rn, write_back, registers); + } + + void stmfa(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void stmfa(Register rn, WriteBack write_back, RegisterList registers) { + stmfa(al, rn, write_back, registers); + } + + void stmfd(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void stmfd(Register rn, WriteBack write_back, RegisterList registers) { + stmfd(al, rn, write_back, registers); + } + + void stmib(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + void stmib(Register rn, WriteBack write_back, RegisterList registers) { + stmib(al, rn, write_back, registers); + } + + void str(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + void str(Register rt, const MemOperand& operand) { + str(al, Best, rt, operand); + } + void str(Condition cond, Register rt, const MemOperand& operand) { + str(cond, Best, rt, operand); + } + void str(EncodingSize size, Register rt, const MemOperand& operand) { + str(al, size, rt, operand); + } + + void strb(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + void strb(Register rt, const MemOperand& operand) { + strb(al, Best, rt, operand); + } + void strb(Condition cond, Register rt, const MemOperand& operand) { + strb(cond, Best, rt, operand); + } + void strb(EncodingSize size, Register rt, const MemOperand& operand) { + strb(al, size, rt, operand); + } + + void strd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand); + void strd(Register rt, Register rt2, const MemOperand& operand) { + strd(al, rt, rt2, operand); + } + + void strex(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + void strex(Register rd, Register rt, const MemOperand& operand) { + strex(al, rd, rt, operand); + } + + void strexb(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + void strexb(Register rd, Register rt, const MemOperand& operand) { + strexb(al, rd, rt, operand); + } + + void strexd(Condition cond, + Register rd, + Register rt, + Register rt2, + const MemOperand& operand); + void strexd(Register rd, + Register rt, + Register rt2, + const MemOperand& operand) { + strexd(al, rd, rt, rt2, operand); + } + + void strexh(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + void strexh(Register rd, Register rt, const MemOperand& operand) { + strexh(al, rd, rt, operand); + } + + void strh(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + void strh(Register rt, const MemOperand& operand) { + strh(al, Best, rt, operand); + } + void strh(Condition cond, Register rt, const MemOperand& operand) { + strh(cond, Best, rt, operand); + } + void strh(EncodingSize size, Register rt, const MemOperand& operand) { + strh(al, size, rt, operand); + } + + void sub(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void sub(Register rd, Register rn, const Operand& operand) { + sub(al, Best, rd, rn, operand); + } + void sub(Condition cond, Register rd, Register rn, const Operand& operand) { + sub(cond, Best, rd, rn, operand); + } + void sub(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + sub(al, size, rd, rn, operand); + } + + void sub(Condition cond, Register rd, const Operand& operand); + void sub(Register rd, const Operand& operand) { sub(al, rd, operand); } + + void subs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + void subs(Register rd, Register rn, const Operand& operand) { + subs(al, Best, rd, rn, operand); + } + void subs(Condition cond, Register rd, Register rn, const Operand& operand) { + subs(cond, Best, rd, rn, operand); + } + void subs(EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + subs(al, size, rd, rn, operand); + } + + void subs(Register rd, const Operand& operand); + + void subw(Condition cond, Register rd, Register rn, const Operand& operand); + void subw(Register rd, Register rn, const Operand& operand) { + subw(al, rd, rn, operand); + } + + void svc(Condition cond, uint32_t imm); + void svc(uint32_t imm) { svc(al, imm); } + + void sxtab(Condition cond, Register rd, Register rn, const Operand& operand); + void sxtab(Register rd, Register rn, const Operand& operand) { + sxtab(al, rd, rn, operand); + } + + void sxtab16(Condition cond, + Register rd, + Register rn, + const Operand& operand); + void sxtab16(Register rd, Register rn, const Operand& operand) { + sxtab16(al, rd, rn, operand); + } + + void sxtah(Condition cond, Register rd, Register rn, const Operand& operand); + void sxtah(Register rd, Register rn, const Operand& operand) { + sxtah(al, rd, rn, operand); + } + + void sxtb(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + void sxtb(Register rd, const Operand& operand) { + sxtb(al, Best, rd, operand); + } + void sxtb(Condition cond, Register rd, const Operand& operand) { + sxtb(cond, Best, rd, operand); + } + void sxtb(EncodingSize size, Register rd, const Operand& operand) { + sxtb(al, size, rd, operand); + } + + void sxtb16(Condition cond, Register rd, const Operand& operand); + void sxtb16(Register rd, const Operand& operand) { sxtb16(al, rd, operand); } + + void sxth(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + void sxth(Register rd, const Operand& operand) { + sxth(al, Best, rd, operand); + } + void sxth(Condition cond, Register rd, const Operand& operand) { + sxth(cond, Best, rd, operand); + } + void sxth(EncodingSize size, Register rd, const Operand& operand) { + sxth(al, size, rd, operand); + } + + void tbb(Condition cond, Register rn, Register rm); + void tbb(Register rn, Register rm) { tbb(al, rn, rm); } + + void tbh(Condition cond, Register rn, Register rm); + void tbh(Register rn, Register rm) { tbh(al, rn, rm); } + + void teq(Condition cond, Register rn, const Operand& operand); + void teq(Register rn, const Operand& operand) { teq(al, rn, operand); } + + void tst(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand); + void tst(Register rn, const Operand& operand) { tst(al, Best, rn, operand); } + void tst(Condition cond, Register rn, const Operand& operand) { + tst(cond, Best, rn, operand); + } + void tst(EncodingSize size, Register rn, const Operand& operand) { + tst(al, size, rn, operand); + } + + void uadd16(Condition cond, Register rd, Register rn, Register rm); + void uadd16(Register rd, Register rn, Register rm) { uadd16(al, rd, rn, rm); } + + void uadd8(Condition cond, Register rd, Register rn, Register rm); + void uadd8(Register rd, Register rn, Register rm) { uadd8(al, rd, rn, rm); } + + void uasx(Condition cond, Register rd, Register rn, Register rm); + void uasx(Register rd, Register rn, Register rm) { uasx(al, rd, rn, rm); } + + void ubfx( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width); + void ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width) { + ubfx(al, rd, rn, lsb, width); + } + + void udf(Condition cond, EncodingSize size, uint32_t imm); + void udf(uint32_t imm) { udf(al, Best, imm); } + void udf(Condition cond, uint32_t imm) { udf(cond, Best, imm); } + void udf(EncodingSize size, uint32_t imm) { udf(al, size, imm); } + + void udiv(Condition cond, Register rd, Register rn, Register rm); + void udiv(Register rd, Register rn, Register rm) { udiv(al, rd, rn, rm); } + + void uhadd16(Condition cond, Register rd, Register rn, Register rm); + void uhadd16(Register rd, Register rn, Register rm) { + uhadd16(al, rd, rn, rm); + } + + void uhadd8(Condition cond, Register rd, Register rn, Register rm); + void uhadd8(Register rd, Register rn, Register rm) { uhadd8(al, rd, rn, rm); } + + void uhasx(Condition cond, Register rd, Register rn, Register rm); + void uhasx(Register rd, Register rn, Register rm) { uhasx(al, rd, rn, rm); } + + void uhsax(Condition cond, Register rd, Register rn, Register rm); + void uhsax(Register rd, Register rn, Register rm) { uhsax(al, rd, rn, rm); } + + void uhsub16(Condition cond, Register rd, Register rn, Register rm); + void uhsub16(Register rd, Register rn, Register rm) { + uhsub16(al, rd, rn, rm); + } + + void uhsub8(Condition cond, Register rd, Register rn, Register rm); + void uhsub8(Register rd, Register rn, Register rm) { uhsub8(al, rd, rn, rm); } + + void umaal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void umaal(Register rdlo, Register rdhi, Register rn, Register rm) { + umaal(al, rdlo, rdhi, rn, rm); + } + + void umlal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void umlal(Register rdlo, Register rdhi, Register rn, Register rm) { + umlal(al, rdlo, rdhi, rn, rm); + } + + void umlals( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void umlals(Register rdlo, Register rdhi, Register rn, Register rm) { + umlals(al, rdlo, rdhi, rn, rm); + } + + void umull( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void umull(Register rdlo, Register rdhi, Register rn, Register rm) { + umull(al, rdlo, rdhi, rn, rm); + } + + void umulls( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + void umulls(Register rdlo, Register rdhi, Register rn, Register rm) { + umulls(al, rdlo, rdhi, rn, rm); + } + + void uqadd16(Condition cond, Register rd, Register rn, Register rm); + void uqadd16(Register rd, Register rn, Register rm) { + uqadd16(al, rd, rn, rm); + } + + void uqadd8(Condition cond, Register rd, Register rn, Register rm); + void uqadd8(Register rd, Register rn, Register rm) { uqadd8(al, rd, rn, rm); } + + void uqasx(Condition cond, Register rd, Register rn, Register rm); + void uqasx(Register rd, Register rn, Register rm) { uqasx(al, rd, rn, rm); } + + void uqsax(Condition cond, Register rd, Register rn, Register rm); + void uqsax(Register rd, Register rn, Register rm) { uqsax(al, rd, rn, rm); } + + void uqsub16(Condition cond, Register rd, Register rn, Register rm); + void uqsub16(Register rd, Register rn, Register rm) { + uqsub16(al, rd, rn, rm); + } + + void uqsub8(Condition cond, Register rd, Register rn, Register rm); + void uqsub8(Register rd, Register rn, Register rm) { uqsub8(al, rd, rn, rm); } + + void usad8(Condition cond, Register rd, Register rn, Register rm); + void usad8(Register rd, Register rn, Register rm) { usad8(al, rd, rn, rm); } + + void usada8( + Condition cond, Register rd, Register rn, Register rm, Register ra); + void usada8(Register rd, Register rn, Register rm, Register ra) { + usada8(al, rd, rn, rm, ra); + } + + void usat(Condition cond, Register rd, uint32_t imm, const Operand& operand); + void usat(Register rd, uint32_t imm, const Operand& operand) { + usat(al, rd, imm, operand); + } + + void usat16(Condition cond, Register rd, uint32_t imm, Register rn); + void usat16(Register rd, uint32_t imm, Register rn) { + usat16(al, rd, imm, rn); + } + + void usax(Condition cond, Register rd, Register rn, Register rm); + void usax(Register rd, Register rn, Register rm) { usax(al, rd, rn, rm); } + + void usub16(Condition cond, Register rd, Register rn, Register rm); + void usub16(Register rd, Register rn, Register rm) { usub16(al, rd, rn, rm); } + + void usub8(Condition cond, Register rd, Register rn, Register rm); + void usub8(Register rd, Register rn, Register rm) { usub8(al, rd, rn, rm); } + + void uxtab(Condition cond, Register rd, Register rn, const Operand& operand); + void uxtab(Register rd, Register rn, const Operand& operand) { + uxtab(al, rd, rn, operand); + } + + void uxtab16(Condition cond, + Register rd, + Register rn, + const Operand& operand); + void uxtab16(Register rd, Register rn, const Operand& operand) { + uxtab16(al, rd, rn, operand); + } + + void uxtah(Condition cond, Register rd, Register rn, const Operand& operand); + void uxtah(Register rd, Register rn, const Operand& operand) { + uxtah(al, rd, rn, operand); + } + + void uxtb(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + void uxtb(Register rd, const Operand& operand) { + uxtb(al, Best, rd, operand); + } + void uxtb(Condition cond, Register rd, const Operand& operand) { + uxtb(cond, Best, rd, operand); + } + void uxtb(EncodingSize size, Register rd, const Operand& operand) { + uxtb(al, size, rd, operand); + } + + void uxtb16(Condition cond, Register rd, const Operand& operand); + void uxtb16(Register rd, const Operand& operand) { uxtb16(al, rd, operand); } + + void uxth(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + void uxth(Register rd, const Operand& operand) { + uxth(al, Best, rd, operand); + } + void uxth(Condition cond, Register rd, const Operand& operand) { + uxth(cond, Best, rd, operand); + } + void uxth(EncodingSize size, Register rd, const Operand& operand) { + uxth(al, size, rd, operand); + } + + void vaba( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vaba(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vaba(al, dt, rd, rn, rm); + } + + void vaba( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vaba(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vaba(al, dt, rd, rn, rm); + } + + void vabal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vabal(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vabal(al, dt, rd, rn, rm); + } + + void vabd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vabd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vabd(al, dt, rd, rn, rm); + } + + void vabd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vabd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vabd(al, dt, rd, rn, rm); + } + + void vabdl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vabdl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vabdl(al, dt, rd, rn, rm); + } + + void vabs(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vabs(DataType dt, DRegister rd, DRegister rm) { vabs(al, dt, rd, rm); } + + void vabs(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vabs(DataType dt, QRegister rd, QRegister rm) { vabs(al, dt, rd, rm); } + + void vabs(Condition cond, DataType dt, SRegister rd, SRegister rm); + void vabs(DataType dt, SRegister rd, SRegister rm) { vabs(al, dt, rd, rm); } + + void vacge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vacge(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vacge(al, dt, rd, rn, rm); + } + + void vacge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vacge(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vacge(al, dt, rd, rn, rm); + } + + void vacgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vacgt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vacgt(al, dt, rd, rn, rm); + } + + void vacgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vacgt(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vacgt(al, dt, rd, rn, rm); + } + + void vacle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vacle(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vacle(al, dt, rd, rn, rm); + } + + void vacle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vacle(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vacle(al, dt, rd, rn, rm); + } + + void vaclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vaclt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vaclt(al, dt, rd, rn, rm); + } + + void vaclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vaclt(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vaclt(al, dt, rd, rn, rm); + } + + void vadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vadd(al, dt, rd, rn, rm); + } + + void vadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vadd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vadd(al, dt, rd, rn, rm); + } + + void vadd( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vadd(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vadd(al, dt, rd, rn, rm); + } + + void vaddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm); + void vaddhn(DataType dt, DRegister rd, QRegister rn, QRegister rm) { + vaddhn(al, dt, rd, rn, rm); + } + + void vaddl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vaddl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vaddl(al, dt, rd, rn, rm); + } + + void vaddw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm); + void vaddw(DataType dt, QRegister rd, QRegister rn, DRegister rm) { + vaddw(al, dt, rd, rn, rm); + } + + void vand(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand); + void vand(DataType dt, DRegister rd, DRegister rn, const DOperand& operand) { + vand(al, dt, rd, rn, operand); + } + + void vand(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand); + void vand(DataType dt, QRegister rd, QRegister rn, const QOperand& operand) { + vand(al, dt, rd, rn, operand); + } + + void vbic(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand); + void vbic(DataType dt, DRegister rd, DRegister rn, const DOperand& operand) { + vbic(al, dt, rd, rn, operand); + } + + void vbic(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand); + void vbic(DataType dt, QRegister rd, QRegister rn, const QOperand& operand) { + vbic(al, dt, rd, rn, operand); + } + + void vbif( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vbif(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vbif(al, dt, rd, rn, rm); + } + void vbif(DRegister rd, DRegister rn, DRegister rm) { + vbif(al, kDataTypeValueNone, rd, rn, rm); + } + void vbif(Condition cond, DRegister rd, DRegister rn, DRegister rm) { + vbif(cond, kDataTypeValueNone, rd, rn, rm); + } + + void vbif( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vbif(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vbif(al, dt, rd, rn, rm); + } + void vbif(QRegister rd, QRegister rn, QRegister rm) { + vbif(al, kDataTypeValueNone, rd, rn, rm); + } + void vbif(Condition cond, QRegister rd, QRegister rn, QRegister rm) { + vbif(cond, kDataTypeValueNone, rd, rn, rm); + } + + void vbit( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vbit(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vbit(al, dt, rd, rn, rm); + } + void vbit(DRegister rd, DRegister rn, DRegister rm) { + vbit(al, kDataTypeValueNone, rd, rn, rm); + } + void vbit(Condition cond, DRegister rd, DRegister rn, DRegister rm) { + vbit(cond, kDataTypeValueNone, rd, rn, rm); + } + + void vbit( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vbit(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vbit(al, dt, rd, rn, rm); + } + void vbit(QRegister rd, QRegister rn, QRegister rm) { + vbit(al, kDataTypeValueNone, rd, rn, rm); + } + void vbit(Condition cond, QRegister rd, QRegister rn, QRegister rm) { + vbit(cond, kDataTypeValueNone, rd, rn, rm); + } + + void vbsl( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vbsl(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vbsl(al, dt, rd, rn, rm); + } + void vbsl(DRegister rd, DRegister rn, DRegister rm) { + vbsl(al, kDataTypeValueNone, rd, rn, rm); + } + void vbsl(Condition cond, DRegister rd, DRegister rn, DRegister rm) { + vbsl(cond, kDataTypeValueNone, rd, rn, rm); + } + + void vbsl( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vbsl(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vbsl(al, dt, rd, rn, rm); + } + void vbsl(QRegister rd, QRegister rn, QRegister rm) { + vbsl(al, kDataTypeValueNone, rd, rn, rm); + } + void vbsl(Condition cond, QRegister rd, QRegister rn, QRegister rm) { + vbsl(cond, kDataTypeValueNone, rd, rn, rm); + } + + void vceq(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vceq(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vceq(al, dt, rd, rm, operand); + } + + void vceq(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vceq(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vceq(al, dt, rd, rm, operand); + } + + void vceq( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vceq(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vceq(al, dt, rd, rn, rm); + } + + void vceq( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vceq(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vceq(al, dt, rd, rn, rm); + } + + void vcge(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vcge(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vcge(al, dt, rd, rm, operand); + } + + void vcge(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vcge(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vcge(al, dt, rd, rm, operand); + } + + void vcge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vcge(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vcge(al, dt, rd, rn, rm); + } + + void vcge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vcge(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vcge(al, dt, rd, rn, rm); + } + + void vcgt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vcgt(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vcgt(al, dt, rd, rm, operand); + } + + void vcgt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vcgt(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vcgt(al, dt, rd, rm, operand); + } + + void vcgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vcgt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vcgt(al, dt, rd, rn, rm); + } + + void vcgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vcgt(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vcgt(al, dt, rd, rn, rm); + } + + void vcle(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vcle(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vcle(al, dt, rd, rm, operand); + } + + void vcle(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vcle(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vcle(al, dt, rd, rm, operand); + } + + void vcle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vcle(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vcle(al, dt, rd, rn, rm); + } + + void vcle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vcle(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vcle(al, dt, rd, rn, rm); + } + + void vcls(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vcls(DataType dt, DRegister rd, DRegister rm) { vcls(al, dt, rd, rm); } + + void vcls(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vcls(DataType dt, QRegister rd, QRegister rm) { vcls(al, dt, rd, rm); } + + void vclt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vclt(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vclt(al, dt, rd, rm, operand); + } + + void vclt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vclt(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vclt(al, dt, rd, rm, operand); + } + + void vclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vclt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vclt(al, dt, rd, rn, rm); + } + + void vclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vclt(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vclt(al, dt, rd, rn, rm); + } + + void vclz(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vclz(DataType dt, DRegister rd, DRegister rm) { vclz(al, dt, rd, rm); } + + void vclz(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vclz(DataType dt, QRegister rd, QRegister rm) { vclz(al, dt, rd, rm); } + + void vcmp(Condition cond, DataType dt, SRegister rd, const SOperand& operand); + void vcmp(DataType dt, SRegister rd, const SOperand& operand) { + vcmp(al, dt, rd, operand); + } + + void vcmp(Condition cond, DataType dt, DRegister rd, const DOperand& operand); + void vcmp(DataType dt, DRegister rd, const DOperand& operand) { + vcmp(al, dt, rd, operand); + } + + void vcmpe(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand); + void vcmpe(DataType dt, SRegister rd, const SOperand& operand) { + vcmpe(al, dt, rd, operand); + } + + void vcmpe(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand); + void vcmpe(DataType dt, DRegister rd, const DOperand& operand) { + vcmpe(al, dt, rd, operand); + } + + void vcnt(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vcnt(DataType dt, DRegister rd, DRegister rm) { vcnt(al, dt, rd, rm); } + + void vcnt(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vcnt(DataType dt, QRegister rd, QRegister rm) { vcnt(al, dt, rd, rm); } + + void vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm); + void vcvt(DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + vcvt(al, dt1, dt2, rd, rm); + } + + void vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm); + void vcvt(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + vcvt(al, dt1, dt2, rd, rm); + } + + void vcvt(Condition cond, + DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm, + int32_t fbits); + void vcvt( + DataType dt1, DataType dt2, DRegister rd, DRegister rm, int32_t fbits) { + vcvt(al, dt1, dt2, rd, rm, fbits); + } + + void vcvt(Condition cond, + DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm, + int32_t fbits); + void vcvt( + DataType dt1, DataType dt2, QRegister rd, QRegister rm, int32_t fbits) { + vcvt(al, dt1, dt2, rd, rm, fbits); + } + + void vcvt(Condition cond, + DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm, + int32_t fbits); + void vcvt( + DataType dt1, DataType dt2, SRegister rd, SRegister rm, int32_t fbits) { + vcvt(al, dt1, dt2, rd, rm, fbits); + } + + void vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, DRegister rm); + void vcvt(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + vcvt(al, dt1, dt2, rd, rm); + } + + void vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, QRegister rm); + void vcvt(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + vcvt(al, dt1, dt2, rd, rm); + } + + void vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, QRegister rm); + void vcvt(DataType dt1, DataType dt2, DRegister rd, QRegister rm) { + vcvt(al, dt1, dt2, rd, rm); + } + + void vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, DRegister rm); + void vcvt(DataType dt1, DataType dt2, QRegister rd, DRegister rm) { + vcvt(al, dt1, dt2, rd, rm); + } + + void vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm); + void vcvt(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + vcvt(al, dt1, dt2, rd, rm); + } + + void vcvta(DataType dt1, DataType dt2, DRegister rd, DRegister rm); + + void vcvta(DataType dt1, DataType dt2, QRegister rd, QRegister rm); + + void vcvta(DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvta(DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm); + void vcvtb(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + vcvtb(al, dt1, dt2, rd, rm); + } + + void vcvtb( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm); + void vcvtb(DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + vcvtb(al, dt1, dt2, rd, rm); + } + + void vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm); + void vcvtb(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + vcvtb(al, dt1, dt2, rd, rm); + } + + void vcvtm(DataType dt1, DataType dt2, DRegister rd, DRegister rm); + + void vcvtm(DataType dt1, DataType dt2, QRegister rd, QRegister rm); + + void vcvtm(DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvtm(DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtn(DataType dt1, DataType dt2, DRegister rd, DRegister rm); + + void vcvtn(DataType dt1, DataType dt2, QRegister rd, QRegister rm); + + void vcvtn(DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvtn(DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtp(DataType dt1, DataType dt2, DRegister rd, DRegister rm); + + void vcvtp(DataType dt1, DataType dt2, QRegister rd, QRegister rm); + + void vcvtp(DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvtp(DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm); + void vcvtr(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + vcvtr(al, dt1, dt2, rd, rm); + } + + void vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm); + void vcvtr(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + vcvtr(al, dt1, dt2, rd, rm); + } + + void vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm); + void vcvtt(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + vcvtt(al, dt1, dt2, rd, rm); + } + + void vcvtt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm); + void vcvtt(DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + vcvtt(al, dt1, dt2, rd, rm); + } + + void vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm); + void vcvtt(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + vcvtt(al, dt1, dt2, rd, rm); + } + + void vdiv( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vdiv(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vdiv(al, dt, rd, rn, rm); + } + + void vdiv( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vdiv(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vdiv(al, dt, rd, rn, rm); + } + + void vdup(Condition cond, DataType dt, QRegister rd, Register rt); + void vdup(DataType dt, QRegister rd, Register rt) { vdup(al, dt, rd, rt); } + + void vdup(Condition cond, DataType dt, DRegister rd, Register rt); + void vdup(DataType dt, DRegister rd, Register rt) { vdup(al, dt, rd, rt); } + + void vdup(Condition cond, DataType dt, DRegister rd, DRegisterLane rm); + void vdup(DataType dt, DRegister rd, DRegisterLane rm) { + vdup(al, dt, rd, rm); + } + + void vdup(Condition cond, DataType dt, QRegister rd, DRegisterLane rm); + void vdup(DataType dt, QRegister rd, DRegisterLane rm) { + vdup(al, dt, rd, rm); + } + + void veor( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void veor(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + veor(al, dt, rd, rn, rm); + } + void veor(DRegister rd, DRegister rn, DRegister rm) { + veor(al, kDataTypeValueNone, rd, rn, rm); + } + void veor(Condition cond, DRegister rd, DRegister rn, DRegister rm) { + veor(cond, kDataTypeValueNone, rd, rn, rm); + } + + void veor( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void veor(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + veor(al, dt, rd, rn, rm); + } + void veor(QRegister rd, QRegister rn, QRegister rm) { + veor(al, kDataTypeValueNone, rd, rn, rm); + } + void veor(Condition cond, QRegister rd, QRegister rn, QRegister rm) { + veor(cond, kDataTypeValueNone, rd, rn, rm); + } + + void vext(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister rm, + const DOperand& operand); + void vext(DataType dt, + DRegister rd, + DRegister rn, + DRegister rm, + const DOperand& operand) { + vext(al, dt, rd, rn, rm, operand); + } + + void vext(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + QRegister rm, + const QOperand& operand); + void vext(DataType dt, + QRegister rd, + QRegister rn, + QRegister rm, + const QOperand& operand) { + vext(al, dt, rd, rn, rm, operand); + } + + void vfma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vfma(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vfma(al, dt, rd, rn, rm); + } + + void vfma( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vfma(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vfma(al, dt, rd, rn, rm); + } + + void vfma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vfma(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vfma(al, dt, rd, rn, rm); + } + + void vfms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vfms(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vfms(al, dt, rd, rn, rm); + } + + void vfms( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vfms(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vfms(al, dt, rd, rn, rm); + } + + void vfms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vfms(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vfms(al, dt, rd, rn, rm); + } + + void vfnma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vfnma(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vfnma(al, dt, rd, rn, rm); + } + + void vfnma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vfnma(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vfnma(al, dt, rd, rn, rm); + } + + void vfnms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vfnms(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vfnms(al, dt, rd, rn, rm); + } + + void vfnms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vfnms(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vfnms(al, dt, rd, rn, rm); + } + + void vhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vhadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vhadd(al, dt, rd, rn, rm); + } + + void vhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vhadd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vhadd(al, dt, rd, rn, rm); + } + + void vhsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vhsub(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vhsub(al, dt, rd, rn, rm); + } + + void vhsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vhsub(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vhsub(al, dt, rd, rn, rm); + } + + void vld1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + void vld1(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + vld1(al, dt, nreglist, operand); + } + + void vld2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + void vld2(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + vld2(al, dt, nreglist, operand); + } + + void vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + void vld3(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + vld3(al, dt, nreglist, operand); + } + + void vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand); + void vld3(DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + vld3(al, dt, nreglist, operand); + } + + void vld4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + void vld4(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + vld4(al, dt, nreglist, operand); + } + + void vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void vldm(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vldm(al, dt, rn, write_back, dreglist); + } + void vldm(Register rn, WriteBack write_back, DRegisterList dreglist) { + vldm(al, kDataTypeValueNone, rn, write_back, dreglist); + } + void vldm(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vldm(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + + void vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + void vldm(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vldm(al, dt, rn, write_back, sreglist); + } + void vldm(Register rn, WriteBack write_back, SRegisterList sreglist) { + vldm(al, kDataTypeValueNone, rn, write_back, sreglist); + } + void vldm(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vldm(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + + void vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void vldmdb(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vldmdb(al, dt, rn, write_back, dreglist); + } + void vldmdb(Register rn, WriteBack write_back, DRegisterList dreglist) { + vldmdb(al, kDataTypeValueNone, rn, write_back, dreglist); + } + void vldmdb(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vldmdb(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + + void vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + void vldmdb(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vldmdb(al, dt, rn, write_back, sreglist); + } + void vldmdb(Register rn, WriteBack write_back, SRegisterList sreglist) { + vldmdb(al, kDataTypeValueNone, rn, write_back, sreglist); + } + void vldmdb(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vldmdb(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + + void vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void vldmia(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vldmia(al, dt, rn, write_back, dreglist); + } + void vldmia(Register rn, WriteBack write_back, DRegisterList dreglist) { + vldmia(al, kDataTypeValueNone, rn, write_back, dreglist); + } + void vldmia(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vldmia(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + + void vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + void vldmia(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vldmia(al, dt, rn, write_back, sreglist); + } + void vldmia(Register rn, WriteBack write_back, SRegisterList sreglist) { + vldmia(al, kDataTypeValueNone, rn, write_back, sreglist); + } + void vldmia(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vldmia(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + + void vldr(Condition cond, DataType dt, DRegister rd, Location* location); + bool vldr_info(Condition cond, + DataType dt, + DRegister rd, + Location* location, + const struct ReferenceInfo** info); + void vldr(DataType dt, DRegister rd, Location* location) { + vldr(al, dt, rd, location); + } + void vldr(DRegister rd, Location* location) { + vldr(al, Untyped64, rd, location); + } + void vldr(Condition cond, DRegister rd, Location* location) { + vldr(cond, Untyped64, rd, location); + } + + void vldr(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand); + void vldr(DataType dt, DRegister rd, const MemOperand& operand) { + vldr(al, dt, rd, operand); + } + void vldr(DRegister rd, const MemOperand& operand) { + vldr(al, Untyped64, rd, operand); + } + void vldr(Condition cond, DRegister rd, const MemOperand& operand) { + vldr(cond, Untyped64, rd, operand); + } + + void vldr(Condition cond, DataType dt, SRegister rd, Location* location); + bool vldr_info(Condition cond, + DataType dt, + SRegister rd, + Location* location, + const struct ReferenceInfo** info); + void vldr(DataType dt, SRegister rd, Location* location) { + vldr(al, dt, rd, location); + } + void vldr(SRegister rd, Location* location) { + vldr(al, Untyped32, rd, location); + } + void vldr(Condition cond, SRegister rd, Location* location) { + vldr(cond, Untyped32, rd, location); + } + + void vldr(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand); + void vldr(DataType dt, SRegister rd, const MemOperand& operand) { + vldr(al, dt, rd, operand); + } + void vldr(SRegister rd, const MemOperand& operand) { + vldr(al, Untyped32, rd, operand); + } + void vldr(Condition cond, SRegister rd, const MemOperand& operand) { + vldr(cond, Untyped32, rd, operand); + } + + void vmax( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vmax(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vmax(al, dt, rd, rn, rm); + } + + void vmax( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vmax(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vmax(al, dt, rd, rn, rm); + } + + void vmaxnm(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vmaxnm(DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vmaxnm(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vmin(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vmin(al, dt, rd, rn, rm); + } + + void vmin( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vmin(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vmin(al, dt, rd, rn, rm); + } + + void vminnm(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vminnm(DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vminnm(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vmla(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm); + void vmla(DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + vmla(al, dt, rd, rn, rm); + } + + void vmla(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm); + void vmla(DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + vmla(al, dt, rd, rn, rm); + } + + void vmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vmla(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vmla(al, dt, rd, rn, rm); + } + + void vmla( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vmla(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vmla(al, dt, rd, rn, rm); + } + + void vmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vmla(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vmla(al, dt, rd, rn, rm); + } + + void vmlal(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm); + void vmlal(DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + vmlal(al, dt, rd, rn, rm); + } + + void vmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vmlal(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vmlal(al, dt, rd, rn, rm); + } + + void vmls(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm); + void vmls(DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + vmls(al, dt, rd, rn, rm); + } + + void vmls(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm); + void vmls(DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + vmls(al, dt, rd, rn, rm); + } + + void vmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vmls(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vmls(al, dt, rd, rn, rm); + } + + void vmls( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vmls(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vmls(al, dt, rd, rn, rm); + } + + void vmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vmls(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vmls(al, dt, rd, rn, rm); + } + + void vmlsl(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm); + void vmlsl(DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + vmlsl(al, dt, rd, rn, rm); + } + + void vmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vmlsl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vmlsl(al, dt, rd, rn, rm); + } + + void vmov(Condition cond, Register rt, SRegister rn); + void vmov(Register rt, SRegister rn) { vmov(al, rt, rn); } + + void vmov(Condition cond, SRegister rn, Register rt); + void vmov(SRegister rn, Register rt) { vmov(al, rn, rt); } + + void vmov(Condition cond, Register rt, Register rt2, DRegister rm); + void vmov(Register rt, Register rt2, DRegister rm) { vmov(al, rt, rt2, rm); } + + void vmov(Condition cond, DRegister rm, Register rt, Register rt2); + void vmov(DRegister rm, Register rt, Register rt2) { vmov(al, rm, rt, rt2); } + + void vmov( + Condition cond, Register rt, Register rt2, SRegister rm, SRegister rm1); + void vmov(Register rt, Register rt2, SRegister rm, SRegister rm1) { + vmov(al, rt, rt2, rm, rm1); + } + + void vmov( + Condition cond, SRegister rm, SRegister rm1, Register rt, Register rt2); + void vmov(SRegister rm, SRegister rm1, Register rt, Register rt2) { + vmov(al, rm, rm1, rt, rt2); + } + + void vmov(Condition cond, DataType dt, DRegisterLane rd, Register rt); + void vmov(DataType dt, DRegisterLane rd, Register rt) { + vmov(al, dt, rd, rt); + } + void vmov(DRegisterLane rd, Register rt) { + vmov(al, kDataTypeValueNone, rd, rt); + } + void vmov(Condition cond, DRegisterLane rd, Register rt) { + vmov(cond, kDataTypeValueNone, rd, rt); + } + + void vmov(Condition cond, DataType dt, DRegister rd, const DOperand& operand); + void vmov(DataType dt, DRegister rd, const DOperand& operand) { + vmov(al, dt, rd, operand); + } + + void vmov(Condition cond, DataType dt, QRegister rd, const QOperand& operand); + void vmov(DataType dt, QRegister rd, const QOperand& operand) { + vmov(al, dt, rd, operand); + } + + void vmov(Condition cond, DataType dt, SRegister rd, const SOperand& operand); + void vmov(DataType dt, SRegister rd, const SOperand& operand) { + vmov(al, dt, rd, operand); + } + + void vmov(Condition cond, DataType dt, Register rt, DRegisterLane rn); + void vmov(DataType dt, Register rt, DRegisterLane rn) { + vmov(al, dt, rt, rn); + } + void vmov(Register rt, DRegisterLane rn) { + vmov(al, kDataTypeValueNone, rt, rn); + } + void vmov(Condition cond, Register rt, DRegisterLane rn) { + vmov(cond, kDataTypeValueNone, rt, rn); + } + + void vmovl(Condition cond, DataType dt, QRegister rd, DRegister rm); + void vmovl(DataType dt, QRegister rd, DRegister rm) { vmovl(al, dt, rd, rm); } + + void vmovn(Condition cond, DataType dt, DRegister rd, QRegister rm); + void vmovn(DataType dt, DRegister rd, QRegister rm) { vmovn(al, dt, rd, rm); } + + void vmrs(Condition cond, RegisterOrAPSR_nzcv rt, SpecialFPRegister spec_reg); + void vmrs(RegisterOrAPSR_nzcv rt, SpecialFPRegister spec_reg) { + vmrs(al, rt, spec_reg); + } + + void vmsr(Condition cond, SpecialFPRegister spec_reg, Register rt); + void vmsr(SpecialFPRegister spec_reg, Register rt) { vmsr(al, spec_reg, rt); } + + void vmul(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + void vmul( + DataType dt, DRegister rd, DRegister rn, DRegister dm, unsigned index) { + vmul(al, dt, rd, rn, dm, index); + } + + void vmul(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegister dm, + unsigned index); + void vmul( + DataType dt, QRegister rd, QRegister rn, DRegister dm, unsigned index) { + vmul(al, dt, rd, rn, dm, index); + } + + void vmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vmul(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vmul(al, dt, rd, rn, rm); + } + + void vmul( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vmul(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vmul(al, dt, rd, rn, rm); + } + + void vmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vmul(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vmul(al, dt, rd, rn, rm); + } + + void vmull(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + void vmull( + DataType dt, QRegister rd, DRegister rn, DRegister dm, unsigned index) { + vmull(al, dt, rd, rn, dm, index); + } + + void vmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vmull(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vmull(al, dt, rd, rn, rm); + } + + void vmvn(Condition cond, DataType dt, DRegister rd, const DOperand& operand); + void vmvn(DataType dt, DRegister rd, const DOperand& operand) { + vmvn(al, dt, rd, operand); + } + + void vmvn(Condition cond, DataType dt, QRegister rd, const QOperand& operand); + void vmvn(DataType dt, QRegister rd, const QOperand& operand) { + vmvn(al, dt, rd, operand); + } + + void vneg(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vneg(DataType dt, DRegister rd, DRegister rm) { vneg(al, dt, rd, rm); } + + void vneg(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vneg(DataType dt, QRegister rd, QRegister rm) { vneg(al, dt, rd, rm); } + + void vneg(Condition cond, DataType dt, SRegister rd, SRegister rm); + void vneg(DataType dt, SRegister rd, SRegister rm) { vneg(al, dt, rd, rm); } + + void vnmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vnmla(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vnmla(al, dt, rd, rn, rm); + } + + void vnmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vnmla(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vnmla(al, dt, rd, rn, rm); + } + + void vnmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vnmls(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vnmls(al, dt, rd, rn, rm); + } + + void vnmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vnmls(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vnmls(al, dt, rd, rn, rm); + } + + void vnmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vnmul(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vnmul(al, dt, rd, rn, rm); + } + + void vnmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vnmul(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vnmul(al, dt, rd, rn, rm); + } + + void vorn(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand); + void vorn(DataType dt, DRegister rd, DRegister rn, const DOperand& operand) { + vorn(al, dt, rd, rn, operand); + } + + void vorn(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand); + void vorn(DataType dt, QRegister rd, QRegister rn, const QOperand& operand) { + vorn(al, dt, rd, rn, operand); + } + + void vorr(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand); + void vorr(DataType dt, DRegister rd, DRegister rn, const DOperand& operand) { + vorr(al, dt, rd, rn, operand); + } + void vorr(DRegister rd, DRegister rn, const DOperand& operand) { + vorr(al, kDataTypeValueNone, rd, rn, operand); + } + void vorr(Condition cond, + DRegister rd, + DRegister rn, + const DOperand& operand) { + vorr(cond, kDataTypeValueNone, rd, rn, operand); + } + + void vorr(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand); + void vorr(DataType dt, QRegister rd, QRegister rn, const QOperand& operand) { + vorr(al, dt, rd, rn, operand); + } + void vorr(QRegister rd, QRegister rn, const QOperand& operand) { + vorr(al, kDataTypeValueNone, rd, rn, operand); + } + void vorr(Condition cond, + QRegister rd, + QRegister rn, + const QOperand& operand) { + vorr(cond, kDataTypeValueNone, rd, rn, operand); + } + + void vpadal(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vpadal(DataType dt, DRegister rd, DRegister rm) { + vpadal(al, dt, rd, rm); + } + + void vpadal(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vpadal(DataType dt, QRegister rd, QRegister rm) { + vpadal(al, dt, rd, rm); + } + + void vpadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vpadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vpadd(al, dt, rd, rn, rm); + } + + void vpaddl(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vpaddl(DataType dt, DRegister rd, DRegister rm) { + vpaddl(al, dt, rd, rm); + } + + void vpaddl(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vpaddl(DataType dt, QRegister rd, QRegister rm) { + vpaddl(al, dt, rd, rm); + } + + void vpmax( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vpmax(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vpmax(al, dt, rd, rn, rm); + } + + void vpmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vpmin(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vpmin(al, dt, rd, rn, rm); + } + + void vpop(Condition cond, DataType dt, DRegisterList dreglist); + void vpop(DataType dt, DRegisterList dreglist) { vpop(al, dt, dreglist); } + void vpop(DRegisterList dreglist) { vpop(al, kDataTypeValueNone, dreglist); } + void vpop(Condition cond, DRegisterList dreglist) { + vpop(cond, kDataTypeValueNone, dreglist); + } + + void vpop(Condition cond, DataType dt, SRegisterList sreglist); + void vpop(DataType dt, SRegisterList sreglist) { vpop(al, dt, sreglist); } + void vpop(SRegisterList sreglist) { vpop(al, kDataTypeValueNone, sreglist); } + void vpop(Condition cond, SRegisterList sreglist) { + vpop(cond, kDataTypeValueNone, sreglist); + } + + void vpush(Condition cond, DataType dt, DRegisterList dreglist); + void vpush(DataType dt, DRegisterList dreglist) { vpush(al, dt, dreglist); } + void vpush(DRegisterList dreglist) { + vpush(al, kDataTypeValueNone, dreglist); + } + void vpush(Condition cond, DRegisterList dreglist) { + vpush(cond, kDataTypeValueNone, dreglist); + } + + void vpush(Condition cond, DataType dt, SRegisterList sreglist); + void vpush(DataType dt, SRegisterList sreglist) { vpush(al, dt, sreglist); } + void vpush(SRegisterList sreglist) { + vpush(al, kDataTypeValueNone, sreglist); + } + void vpush(Condition cond, SRegisterList sreglist) { + vpush(cond, kDataTypeValueNone, sreglist); + } + + void vqabs(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vqabs(DataType dt, DRegister rd, DRegister rm) { vqabs(al, dt, rd, rm); } + + void vqabs(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vqabs(DataType dt, QRegister rd, QRegister rm) { vqabs(al, dt, rd, rm); } + + void vqadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vqadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vqadd(al, dt, rd, rn, rm); + } + + void vqadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vqadd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vqadd(al, dt, rd, rn, rm); + } + + void vqdmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vqdmlal(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vqdmlal(al, dt, rd, rn, rm); + } + + void vqdmlal(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + void vqdmlal( + DataType dt, QRegister rd, DRegister rn, DRegister dm, unsigned index) { + vqdmlal(al, dt, rd, rn, dm, index); + } + + void vqdmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vqdmlsl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vqdmlsl(al, dt, rd, rn, rm); + } + + void vqdmlsl(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + void vqdmlsl( + DataType dt, QRegister rd, DRegister rn, DRegister dm, unsigned index) { + vqdmlsl(al, dt, rd, rn, dm, index); + } + + void vqdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vqdmulh(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vqdmulh(al, dt, rd, rn, rm); + } + + void vqdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vqdmulh(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vqdmulh(al, dt, rd, rn, rm); + } + + void vqdmulh(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm); + void vqdmulh(DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + vqdmulh(al, dt, rd, rn, rm); + } + + void vqdmulh(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm); + void vqdmulh(DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + vqdmulh(al, dt, rd, rn, rm); + } + + void vqdmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vqdmull(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vqdmull(al, dt, rd, rn, rm); + } + + void vqdmull(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm); + void vqdmull(DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + vqdmull(al, dt, rd, rn, rm); + } + + void vqmovn(Condition cond, DataType dt, DRegister rd, QRegister rm); + void vqmovn(DataType dt, DRegister rd, QRegister rm) { + vqmovn(al, dt, rd, rm); + } + + void vqmovun(Condition cond, DataType dt, DRegister rd, QRegister rm); + void vqmovun(DataType dt, DRegister rd, QRegister rm) { + vqmovun(al, dt, rd, rm); + } + + void vqneg(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vqneg(DataType dt, DRegister rd, DRegister rm) { vqneg(al, dt, rd, rm); } + + void vqneg(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vqneg(DataType dt, QRegister rd, QRegister rm) { vqneg(al, dt, rd, rm); } + + void vqrdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vqrdmulh(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vqrdmulh(al, dt, rd, rn, rm); + } + + void vqrdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vqrdmulh(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vqrdmulh(al, dt, rd, rn, rm); + } + + void vqrdmulh(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm); + void vqrdmulh(DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + vqrdmulh(al, dt, rd, rn, rm); + } + + void vqrdmulh(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm); + void vqrdmulh(DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + vqrdmulh(al, dt, rd, rn, rm); + } + + void vqrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn); + void vqrshl(DataType dt, DRegister rd, DRegister rm, DRegister rn) { + vqrshl(al, dt, rd, rm, rn); + } + + void vqrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn); + void vqrshl(DataType dt, QRegister rd, QRegister rm, QRegister rn) { + vqrshl(al, dt, rd, rm, rn); + } + + void vqrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + void vqrshrn(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + vqrshrn(al, dt, rd, rm, operand); + } + + void vqrshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + void vqrshrun(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + vqrshrun(al, dt, rd, rm, operand); + } + + void vqshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vqshl(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vqshl(al, dt, rd, rm, operand); + } + + void vqshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vqshl(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vqshl(al, dt, rd, rm, operand); + } + + void vqshlu(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vqshlu(DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + vqshlu(al, dt, rd, rm, operand); + } + + void vqshlu(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vqshlu(DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + vqshlu(al, dt, rd, rm, operand); + } + + void vqshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + void vqshrn(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + vqshrn(al, dt, rd, rm, operand); + } + + void vqshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + void vqshrun(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + vqshrun(al, dt, rd, rm, operand); + } + + void vqsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vqsub(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vqsub(al, dt, rd, rn, rm); + } + + void vqsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vqsub(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vqsub(al, dt, rd, rn, rm); + } + + void vraddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm); + void vraddhn(DataType dt, DRegister rd, QRegister rn, QRegister rm) { + vraddhn(al, dt, rd, rn, rm); + } + + void vrecpe(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vrecpe(DataType dt, DRegister rd, DRegister rm) { + vrecpe(al, dt, rd, rm); + } + + void vrecpe(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vrecpe(DataType dt, QRegister rd, QRegister rm) { + vrecpe(al, dt, rd, rm); + } + + void vrecps( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vrecps(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vrecps(al, dt, rd, rn, rm); + } + + void vrecps( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vrecps(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vrecps(al, dt, rd, rn, rm); + } + + void vrev16(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vrev16(DataType dt, DRegister rd, DRegister rm) { + vrev16(al, dt, rd, rm); + } + + void vrev16(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vrev16(DataType dt, QRegister rd, QRegister rm) { + vrev16(al, dt, rd, rm); + } + + void vrev32(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vrev32(DataType dt, DRegister rd, DRegister rm) { + vrev32(al, dt, rd, rm); + } + + void vrev32(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vrev32(DataType dt, QRegister rd, QRegister rm) { + vrev32(al, dt, rd, rm); + } + + void vrev64(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vrev64(DataType dt, DRegister rd, DRegister rm) { + vrev64(al, dt, rd, rm); + } + + void vrev64(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vrev64(DataType dt, QRegister rd, QRegister rm) { + vrev64(al, dt, rd, rm); + } + + void vrhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vrhadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vrhadd(al, dt, rd, rn, rm); + } + + void vrhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vrhadd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vrhadd(al, dt, rd, rn, rm); + } + + void vrinta(DataType dt, DRegister rd, DRegister rm); + + void vrinta(DataType dt, QRegister rd, QRegister rm); + + void vrinta(DataType dt, SRegister rd, SRegister rm); + + void vrintm(DataType dt, DRegister rd, DRegister rm); + + void vrintm(DataType dt, QRegister rd, QRegister rm); + + void vrintm(DataType dt, SRegister rd, SRegister rm); + + void vrintn(DataType dt, DRegister rd, DRegister rm); + + void vrintn(DataType dt, QRegister rd, QRegister rm); + + void vrintn(DataType dt, SRegister rd, SRegister rm); + + void vrintp(DataType dt, DRegister rd, DRegister rm); + + void vrintp(DataType dt, QRegister rd, QRegister rm); + + void vrintp(DataType dt, SRegister rd, SRegister rm); + + void vrintr(Condition cond, DataType dt, SRegister rd, SRegister rm); + void vrintr(DataType dt, SRegister rd, SRegister rm) { + vrintr(al, dt, rd, rm); + } + + void vrintr(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vrintr(DataType dt, DRegister rd, DRegister rm) { + vrintr(al, dt, rd, rm); + } + + void vrintx(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vrintx(DataType dt, DRegister rd, DRegister rm) { + vrintx(al, dt, rd, rm); + } + + void vrintx(DataType dt, QRegister rd, QRegister rm); + + void vrintx(Condition cond, DataType dt, SRegister rd, SRegister rm); + void vrintx(DataType dt, SRegister rd, SRegister rm) { + vrintx(al, dt, rd, rm); + } + + void vrintz(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vrintz(DataType dt, DRegister rd, DRegister rm) { + vrintz(al, dt, rd, rm); + } + + void vrintz(DataType dt, QRegister rd, QRegister rm); + + void vrintz(Condition cond, DataType dt, SRegister rd, SRegister rm); + void vrintz(DataType dt, SRegister rd, SRegister rm) { + vrintz(al, dt, rd, rm); + } + + void vrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn); + void vrshl(DataType dt, DRegister rd, DRegister rm, DRegister rn) { + vrshl(al, dt, rd, rm, rn); + } + + void vrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn); + void vrshl(DataType dt, QRegister rd, QRegister rm, QRegister rn) { + vrshl(al, dt, rd, rm, rn); + } + + void vrshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vrshr(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vrshr(al, dt, rd, rm, operand); + } + + void vrshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vrshr(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vrshr(al, dt, rd, rm, operand); + } + + void vrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + void vrshrn(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + vrshrn(al, dt, rd, rm, operand); + } + + void vrsqrte(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vrsqrte(DataType dt, DRegister rd, DRegister rm) { + vrsqrte(al, dt, rd, rm); + } + + void vrsqrte(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vrsqrte(DataType dt, QRegister rd, QRegister rm) { + vrsqrte(al, dt, rd, rm); + } + + void vrsqrts( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vrsqrts(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vrsqrts(al, dt, rd, rn, rm); + } + + void vrsqrts( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vrsqrts(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vrsqrts(al, dt, rd, rn, rm); + } + + void vrsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vrsra(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vrsra(al, dt, rd, rm, operand); + } + + void vrsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vrsra(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vrsra(al, dt, rd, rm, operand); + } + + void vrsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm); + void vrsubhn(DataType dt, DRegister rd, QRegister rn, QRegister rm) { + vrsubhn(al, dt, rd, rn, rm); + } + + void vseleq(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vseleq(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vselge(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vselge(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vselgt(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vselgt(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vselvs(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vselvs(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vshl(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vshl(al, dt, rd, rm, operand); + } + + void vshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vshl(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vshl(al, dt, rd, rm, operand); + } + + void vshll(Condition cond, + DataType dt, + QRegister rd, + DRegister rm, + const DOperand& operand); + void vshll(DataType dt, QRegister rd, DRegister rm, const DOperand& operand) { + vshll(al, dt, rd, rm, operand); + } + + void vshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vshr(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vshr(al, dt, rd, rm, operand); + } + + void vshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vshr(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vshr(al, dt, rd, rm, operand); + } + + void vshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + void vshrn(DataType dt, DRegister rd, QRegister rm, const QOperand& operand) { + vshrn(al, dt, rd, rm, operand); + } + + void vsli(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vsli(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vsli(al, dt, rd, rm, operand); + } + + void vsli(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vsli(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vsli(al, dt, rd, rm, operand); + } + + void vsqrt(Condition cond, DataType dt, SRegister rd, SRegister rm); + void vsqrt(DataType dt, SRegister rd, SRegister rm) { vsqrt(al, dt, rd, rm); } + + void vsqrt(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vsqrt(DataType dt, DRegister rd, DRegister rm) { vsqrt(al, dt, rd, rm); } + + void vsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vsra(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vsra(al, dt, rd, rm, operand); + } + + void vsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vsra(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vsra(al, dt, rd, rm, operand); + } + + void vsri(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + void vsri(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + vsri(al, dt, rd, rm, operand); + } + + void vsri(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + void vsri(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + vsri(al, dt, rd, rm, operand); + } + + void vst1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + void vst1(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + vst1(al, dt, nreglist, operand); + } + + void vst2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + void vst2(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + vst2(al, dt, nreglist, operand); + } + + void vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + void vst3(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + vst3(al, dt, nreglist, operand); + } + + void vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand); + void vst3(DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + vst3(al, dt, nreglist, operand); + } + + void vst4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + void vst4(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + vst4(al, dt, nreglist, operand); + } + + void vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void vstm(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vstm(al, dt, rn, write_back, dreglist); + } + void vstm(Register rn, WriteBack write_back, DRegisterList dreglist) { + vstm(al, kDataTypeValueNone, rn, write_back, dreglist); + } + void vstm(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vstm(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + + void vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + void vstm(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vstm(al, dt, rn, write_back, sreglist); + } + void vstm(Register rn, WriteBack write_back, SRegisterList sreglist) { + vstm(al, kDataTypeValueNone, rn, write_back, sreglist); + } + void vstm(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vstm(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + + void vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void vstmdb(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vstmdb(al, dt, rn, write_back, dreglist); + } + void vstmdb(Register rn, WriteBack write_back, DRegisterList dreglist) { + vstmdb(al, kDataTypeValueNone, rn, write_back, dreglist); + } + void vstmdb(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vstmdb(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + + void vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + void vstmdb(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vstmdb(al, dt, rn, write_back, sreglist); + } + void vstmdb(Register rn, WriteBack write_back, SRegisterList sreglist) { + vstmdb(al, kDataTypeValueNone, rn, write_back, sreglist); + } + void vstmdb(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vstmdb(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + + void vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + void vstmia(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vstmia(al, dt, rn, write_back, dreglist); + } + void vstmia(Register rn, WriteBack write_back, DRegisterList dreglist) { + vstmia(al, kDataTypeValueNone, rn, write_back, dreglist); + } + void vstmia(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + vstmia(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + + void vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + void vstmia(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vstmia(al, dt, rn, write_back, sreglist); + } + void vstmia(Register rn, WriteBack write_back, SRegisterList sreglist) { + vstmia(al, kDataTypeValueNone, rn, write_back, sreglist); + } + void vstmia(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + vstmia(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + + void vstr(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand); + void vstr(DataType dt, DRegister rd, const MemOperand& operand) { + vstr(al, dt, rd, operand); + } + void vstr(DRegister rd, const MemOperand& operand) { + vstr(al, Untyped64, rd, operand); + } + void vstr(Condition cond, DRegister rd, const MemOperand& operand) { + vstr(cond, Untyped64, rd, operand); + } + + void vstr(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand); + void vstr(DataType dt, SRegister rd, const MemOperand& operand) { + vstr(al, dt, rd, operand); + } + void vstr(SRegister rd, const MemOperand& operand) { + vstr(al, Untyped32, rd, operand); + } + void vstr(Condition cond, SRegister rd, const MemOperand& operand) { + vstr(cond, Untyped32, rd, operand); + } + + void vsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vsub(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vsub(al, dt, rd, rn, rm); + } + + void vsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vsub(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vsub(al, dt, rd, rn, rm); + } + + void vsub( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + void vsub(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + vsub(al, dt, rd, rn, rm); + } + + void vsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm); + void vsubhn(DataType dt, DRegister rd, QRegister rn, QRegister rm) { + vsubhn(al, dt, rd, rn, rm); + } + + void vsubl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + void vsubl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + vsubl(al, dt, rd, rn, rm); + } + + void vsubw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm); + void vsubw(DataType dt, QRegister rd, QRegister rn, DRegister rm) { + vsubw(al, dt, rd, rn, rm); + } + + void vswp(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vswp(DataType dt, DRegister rd, DRegister rm) { vswp(al, dt, rd, rm); } + void vswp(DRegister rd, DRegister rm) { + vswp(al, kDataTypeValueNone, rd, rm); + } + void vswp(Condition cond, DRegister rd, DRegister rm) { + vswp(cond, kDataTypeValueNone, rd, rm); + } + + void vswp(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vswp(DataType dt, QRegister rd, QRegister rm) { vswp(al, dt, rd, rm); } + void vswp(QRegister rd, QRegister rm) { + vswp(al, kDataTypeValueNone, rd, rm); + } + void vswp(Condition cond, QRegister rd, QRegister rm) { + vswp(cond, kDataTypeValueNone, rd, rm); + } + + void vtbl(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm); + void vtbl(DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + vtbl(al, dt, rd, nreglist, rm); + } + + void vtbx(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm); + void vtbx(DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + vtbx(al, dt, rd, nreglist, rm); + } + + void vtrn(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vtrn(DataType dt, DRegister rd, DRegister rm) { vtrn(al, dt, rd, rm); } + + void vtrn(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vtrn(DataType dt, QRegister rd, QRegister rm) { vtrn(al, dt, rd, rm); } + + void vtst( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + void vtst(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + vtst(al, dt, rd, rn, rm); + } + + void vtst( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + void vtst(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + vtst(al, dt, rd, rn, rm); + } + + void vuzp(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vuzp(DataType dt, DRegister rd, DRegister rm) { vuzp(al, dt, rd, rm); } + + void vuzp(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vuzp(DataType dt, QRegister rd, QRegister rm) { vuzp(al, dt, rd, rm); } + + void vzip(Condition cond, DataType dt, DRegister rd, DRegister rm); + void vzip(DataType dt, DRegister rd, DRegister rm) { vzip(al, dt, rd, rm); } + + void vzip(Condition cond, DataType dt, QRegister rd, QRegister rm); + void vzip(DataType dt, QRegister rd, QRegister rm) { vzip(al, dt, rd, rm); } + + void yield(Condition cond, EncodingSize size); + void yield() { yield(al, Best); } + void yield(Condition cond) { yield(cond, Best); } + void yield(EncodingSize size) { yield(al, size); } + // End of generated code. + virtual void UnimplementedDelegate(InstructionType type) { + std::string error_message(std::string("Ill-formed '") + + std::string(ToCString(type)) + + std::string("' instruction.\n")); + VIXL_ABORT_WITH_MSG(error_message.c_str()); + } + virtual bool AllowUnpredictable() { return allow_unpredictable_; } + virtual bool AllowStronglyDiscouraged() { + return allow_strongly_discouraged_; + } +}; + +} // namespace aarch32 +} // namespace vixl + +#endif // VIXL_AARCH32_ASSEMBLER_AARCH32_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch32/constants-aarch32.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/constants-aarch32.cc new file mode 100644 index 00000000..75923617 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/constants-aarch32.cc @@ -0,0 +1,855 @@ +// Copyright 2016, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#include "aarch32/constants-aarch32.h" +#include "utils-vixl.h" + +namespace vixl { +namespace aarch32 { + +// Start of generated code. +const char* ToCString(InstructionType type) { + switch (type) { + case kAdc: + return "adc"; + case kAdcs: + return "adcs"; + case kAdd: + return "add"; + case kAdds: + return "adds"; + case kAddw: + return "addw"; + case kAdr: + return "adr"; + case kAnd: + return "and"; + case kAnds: + return "ands"; + case kAsr: + return "asr"; + case kAsrs: + return "asrs"; + case kB: + return "b"; + case kBfc: + return "bfc"; + case kBfi: + return "bfi"; + case kBic: + return "bic"; + case kBics: + return "bics"; + case kBkpt: + return "bkpt"; + case kBl: + return "bl"; + case kBlx: + return "blx"; + case kBx: + return "bx"; + case kBxj: + return "bxj"; + case kCbnz: + return "cbnz"; + case kCbz: + return "cbz"; + case kClrex: + return "clrex"; + case kClz: + return "clz"; + case kCmn: + return "cmn"; + case kCmp: + return "cmp"; + case kCrc32b: + return "crc32b"; + case kCrc32cb: + return "crc32cb"; + case kCrc32ch: + return "crc32ch"; + case kCrc32cw: + return "crc32cw"; + case kCrc32h: + return "crc32h"; + case kCrc32w: + return "crc32w"; + case kDmb: + return "dmb"; + case kDsb: + return "dsb"; + case kEor: + return "eor"; + case kEors: + return "eors"; + case kFldmdbx: + return "fldmdbx"; + case kFldmiax: + return "fldmiax"; + case kFstmdbx: + return "fstmdbx"; + case kFstmiax: + return "fstmiax"; + case kHlt: + return "hlt"; + case kHvc: + return "hvc"; + case kIsb: + return "isb"; + case kIt: + return "it"; + case kLda: + return "lda"; + case kLdab: + return "ldab"; + case kLdaex: + return "ldaex"; + case kLdaexb: + return "ldaexb"; + case kLdaexd: + return "ldaexd"; + case kLdaexh: + return "ldaexh"; + case kLdah: + return "ldah"; + case kLdm: + return "ldm"; + case kLdmda: + return "ldmda"; + case kLdmdb: + return "ldmdb"; + case kLdmea: + return "ldmea"; + case kLdmed: + return "ldmed"; + case kLdmfa: + return "ldmfa"; + case kLdmfd: + return "ldmfd"; + case kLdmib: + return "ldmib"; + case kLdr: + return "ldr"; + case kLdrb: + return "ldrb"; + case kLdrd: + return "ldrd"; + case kLdrex: + return "ldrex"; + case kLdrexb: + return "ldrexb"; + case kLdrexd: + return "ldrexd"; + case kLdrexh: + return "ldrexh"; + case kLdrh: + return "ldrh"; + case kLdrsb: + return "ldrsb"; + case kLdrsh: + return "ldrsh"; + case kLsl: + return "lsl"; + case kLsls: + return "lsls"; + case kLsr: + return "lsr"; + case kLsrs: + return "lsrs"; + case kMla: + return "mla"; + case kMlas: + return "mlas"; + case kMls: + return "mls"; + case kMov: + return "mov"; + case kMovs: + return "movs"; + case kMovt: + return "movt"; + case kMovw: + return "movw"; + case kMrs: + return "mrs"; + case kMsr: + return "msr"; + case kMul: + return "mul"; + case kMuls: + return "muls"; + case kMvn: + return "mvn"; + case kMvns: + return "mvns"; + case kNop: + return "nop"; + case kOrn: + return "orn"; + case kOrns: + return "orns"; + case kOrr: + return "orr"; + case kOrrs: + return "orrs"; + case kPkhbt: + return "pkhbt"; + case kPkhtb: + return "pkhtb"; + case kPld: + return "pld"; + case kPldw: + return "pldw"; + case kPli: + return "pli"; + case kPop: + return "pop"; + case kPush: + return "push"; + case kQadd: + return "qadd"; + case kQadd16: + return "qadd16"; + case kQadd8: + return "qadd8"; + case kQasx: + return "qasx"; + case kQdadd: + return "qdadd"; + case kQdsub: + return "qdsub"; + case kQsax: + return "qsax"; + case kQsub: + return "qsub"; + case kQsub16: + return "qsub16"; + case kQsub8: + return "qsub8"; + case kRbit: + return "rbit"; + case kRev: + return "rev"; + case kRev16: + return "rev16"; + case kRevsh: + return "revsh"; + case kRor: + return "ror"; + case kRors: + return "rors"; + case kRrx: + return "rrx"; + case kRrxs: + return "rrxs"; + case kRsb: + return "rsb"; + case kRsbs: + return "rsbs"; + case kRsc: + return "rsc"; + case kRscs: + return "rscs"; + case kSadd16: + return "sadd16"; + case kSadd8: + return "sadd8"; + case kSasx: + return "sasx"; + case kSbc: + return "sbc"; + case kSbcs: + return "sbcs"; + case kSbfx: + return "sbfx"; + case kSdiv: + return "sdiv"; + case kSel: + return "sel"; + case kShadd16: + return "shadd16"; + case kShadd8: + return "shadd8"; + case kShasx: + return "shasx"; + case kShsax: + return "shsax"; + case kShsub16: + return "shsub16"; + case kShsub8: + return "shsub8"; + case kSmlabb: + return "smlabb"; + case kSmlabt: + return "smlabt"; + case kSmlad: + return "smlad"; + case kSmladx: + return "smladx"; + case kSmlal: + return "smlal"; + case kSmlalbb: + return "smlalbb"; + case kSmlalbt: + return "smlalbt"; + case kSmlald: + return "smlald"; + case kSmlaldx: + return "smlaldx"; + case kSmlals: + return "smlals"; + case kSmlaltb: + return "smlaltb"; + case kSmlaltt: + return "smlaltt"; + case kSmlatb: + return "smlatb"; + case kSmlatt: + return "smlatt"; + case kSmlawb: + return "smlawb"; + case kSmlawt: + return "smlawt"; + case kSmlsd: + return "smlsd"; + case kSmlsdx: + return "smlsdx"; + case kSmlsld: + return "smlsld"; + case kSmlsldx: + return "smlsldx"; + case kSmmla: + return "smmla"; + case kSmmlar: + return "smmlar"; + case kSmmls: + return "smmls"; + case kSmmlsr: + return "smmlsr"; + case kSmmul: + return "smmul"; + case kSmmulr: + return "smmulr"; + case kSmuad: + return "smuad"; + case kSmuadx: + return "smuadx"; + case kSmulbb: + return "smulbb"; + case kSmulbt: + return "smulbt"; + case kSmull: + return "smull"; + case kSmulls: + return "smulls"; + case kSmultb: + return "smultb"; + case kSmultt: + return "smultt"; + case kSmulwb: + return "smulwb"; + case kSmulwt: + return "smulwt"; + case kSmusd: + return "smusd"; + case kSmusdx: + return "smusdx"; + case kSsat: + return "ssat"; + case kSsat16: + return "ssat16"; + case kSsax: + return "ssax"; + case kSsub16: + return "ssub16"; + case kSsub8: + return "ssub8"; + case kStl: + return "stl"; + case kStlb: + return "stlb"; + case kStlex: + return "stlex"; + case kStlexb: + return "stlexb"; + case kStlexd: + return "stlexd"; + case kStlexh: + return "stlexh"; + case kStlh: + return "stlh"; + case kStm: + return "stm"; + case kStmda: + return "stmda"; + case kStmdb: + return "stmdb"; + case kStmea: + return "stmea"; + case kStmed: + return "stmed"; + case kStmfa: + return "stmfa"; + case kStmfd: + return "stmfd"; + case kStmib: + return "stmib"; + case kStr: + return "str"; + case kStrb: + return "strb"; + case kStrd: + return "strd"; + case kStrex: + return "strex"; + case kStrexb: + return "strexb"; + case kStrexd: + return "strexd"; + case kStrexh: + return "strexh"; + case kStrh: + return "strh"; + case kSub: + return "sub"; + case kSubs: + return "subs"; + case kSubw: + return "subw"; + case kSvc: + return "svc"; + case kSxtab: + return "sxtab"; + case kSxtab16: + return "sxtab16"; + case kSxtah: + return "sxtah"; + case kSxtb: + return "sxtb"; + case kSxtb16: + return "sxtb16"; + case kSxth: + return "sxth"; + case kTbb: + return "tbb"; + case kTbh: + return "tbh"; + case kTeq: + return "teq"; + case kTst: + return "tst"; + case kUadd16: + return "uadd16"; + case kUadd8: + return "uadd8"; + case kUasx: + return "uasx"; + case kUbfx: + return "ubfx"; + case kUdf: + return "udf"; + case kUdiv: + return "udiv"; + case kUhadd16: + return "uhadd16"; + case kUhadd8: + return "uhadd8"; + case kUhasx: + return "uhasx"; + case kUhsax: + return "uhsax"; + case kUhsub16: + return "uhsub16"; + case kUhsub8: + return "uhsub8"; + case kUmaal: + return "umaal"; + case kUmlal: + return "umlal"; + case kUmlals: + return "umlals"; + case kUmull: + return "umull"; + case kUmulls: + return "umulls"; + case kUqadd16: + return "uqadd16"; + case kUqadd8: + return "uqadd8"; + case kUqasx: + return "uqasx"; + case kUqsax: + return "uqsax"; + case kUqsub16: + return "uqsub16"; + case kUqsub8: + return "uqsub8"; + case kUsad8: + return "usad8"; + case kUsada8: + return "usada8"; + case kUsat: + return "usat"; + case kUsat16: + return "usat16"; + case kUsax: + return "usax"; + case kUsub16: + return "usub16"; + case kUsub8: + return "usub8"; + case kUxtab: + return "uxtab"; + case kUxtab16: + return "uxtab16"; + case kUxtah: + return "uxtah"; + case kUxtb: + return "uxtb"; + case kUxtb16: + return "uxtb16"; + case kUxth: + return "uxth"; + case kVaba: + return "vaba"; + case kVabal: + return "vabal"; + case kVabd: + return "vabd"; + case kVabdl: + return "vabdl"; + case kVabs: + return "vabs"; + case kVacge: + return "vacge"; + case kVacgt: + return "vacgt"; + case kVacle: + return "vacle"; + case kVaclt: + return "vaclt"; + case kVadd: + return "vadd"; + case kVaddhn: + return "vaddhn"; + case kVaddl: + return "vaddl"; + case kVaddw: + return "vaddw"; + case kVand: + return "vand"; + case kVbic: + return "vbic"; + case kVbif: + return "vbif"; + case kVbit: + return "vbit"; + case kVbsl: + return "vbsl"; + case kVceq: + return "vceq"; + case kVcge: + return "vcge"; + case kVcgt: + return "vcgt"; + case kVcle: + return "vcle"; + case kVcls: + return "vcls"; + case kVclt: + return "vclt"; + case kVclz: + return "vclz"; + case kVcmp: + return "vcmp"; + case kVcmpe: + return "vcmpe"; + case kVcnt: + return "vcnt"; + case kVcvt: + return "vcvt"; + case kVcvta: + return "vcvta"; + case kVcvtb: + return "vcvtb"; + case kVcvtm: + return "vcvtm"; + case kVcvtn: + return "vcvtn"; + case kVcvtp: + return "vcvtp"; + case kVcvtr: + return "vcvtr"; + case kVcvtt: + return "vcvtt"; + case kVdiv: + return "vdiv"; + case kVdup: + return "vdup"; + case kVeor: + return "veor"; + case kVext: + return "vext"; + case kVfma: + return "vfma"; + case kVfms: + return "vfms"; + case kVfnma: + return "vfnma"; + case kVfnms: + return "vfnms"; + case kVhadd: + return "vhadd"; + case kVhsub: + return "vhsub"; + case kVld1: + return "vld1"; + case kVld2: + return "vld2"; + case kVld3: + return "vld3"; + case kVld4: + return "vld4"; + case kVldm: + return "vldm"; + case kVldmdb: + return "vldmdb"; + case kVldmia: + return "vldmia"; + case kVldr: + return "vldr"; + case kVmax: + return "vmax"; + case kVmaxnm: + return "vmaxnm"; + case kVmin: + return "vmin"; + case kVminnm: + return "vminnm"; + case kVmla: + return "vmla"; + case kVmlal: + return "vmlal"; + case kVmls: + return "vmls"; + case kVmlsl: + return "vmlsl"; + case kVmov: + return "vmov"; + case kVmovl: + return "vmovl"; + case kVmovn: + return "vmovn"; + case kVmrs: + return "vmrs"; + case kVmsr: + return "vmsr"; + case kVmul: + return "vmul"; + case kVmull: + return "vmull"; + case kVmvn: + return "vmvn"; + case kVneg: + return "vneg"; + case kVnmla: + return "vnmla"; + case kVnmls: + return "vnmls"; + case kVnmul: + return "vnmul"; + case kVorn: + return "vorn"; + case kVorr: + return "vorr"; + case kVpadal: + return "vpadal"; + case kVpadd: + return "vpadd"; + case kVpaddl: + return "vpaddl"; + case kVpmax: + return "vpmax"; + case kVpmin: + return "vpmin"; + case kVpop: + return "vpop"; + case kVpush: + return "vpush"; + case kVqabs: + return "vqabs"; + case kVqadd: + return "vqadd"; + case kVqdmlal: + return "vqdmlal"; + case kVqdmlsl: + return "vqdmlsl"; + case kVqdmulh: + return "vqdmulh"; + case kVqdmull: + return "vqdmull"; + case kVqmovn: + return "vqmovn"; + case kVqmovun: + return "vqmovun"; + case kVqneg: + return "vqneg"; + case kVqrdmulh: + return "vqrdmulh"; + case kVqrshl: + return "vqrshl"; + case kVqrshrn: + return "vqrshrn"; + case kVqrshrun: + return "vqrshrun"; + case kVqshl: + return "vqshl"; + case kVqshlu: + return "vqshlu"; + case kVqshrn: + return "vqshrn"; + case kVqshrun: + return "vqshrun"; + case kVqsub: + return "vqsub"; + case kVraddhn: + return "vraddhn"; + case kVrecpe: + return "vrecpe"; + case kVrecps: + return "vrecps"; + case kVrev16: + return "vrev16"; + case kVrev32: + return "vrev32"; + case kVrev64: + return "vrev64"; + case kVrhadd: + return "vrhadd"; + case kVrinta: + return "vrinta"; + case kVrintm: + return "vrintm"; + case kVrintn: + return "vrintn"; + case kVrintp: + return "vrintp"; + case kVrintr: + return "vrintr"; + case kVrintx: + return "vrintx"; + case kVrintz: + return "vrintz"; + case kVrshl: + return "vrshl"; + case kVrshr: + return "vrshr"; + case kVrshrn: + return "vrshrn"; + case kVrsqrte: + return "vrsqrte"; + case kVrsqrts: + return "vrsqrts"; + case kVrsra: + return "vrsra"; + case kVrsubhn: + return "vrsubhn"; + case kVseleq: + return "vseleq"; + case kVselge: + return "vselge"; + case kVselgt: + return "vselgt"; + case kVselvs: + return "vselvs"; + case kVshl: + return "vshl"; + case kVshll: + return "vshll"; + case kVshr: + return "vshr"; + case kVshrn: + return "vshrn"; + case kVsli: + return "vsli"; + case kVsqrt: + return "vsqrt"; + case kVsra: + return "vsra"; + case kVsri: + return "vsri"; + case kVst1: + return "vst1"; + case kVst2: + return "vst2"; + case kVst3: + return "vst3"; + case kVst4: + return "vst4"; + case kVstm: + return "vstm"; + case kVstmdb: + return "vstmdb"; + case kVstmia: + return "vstmia"; + case kVstr: + return "vstr"; + case kVsub: + return "vsub"; + case kVsubhn: + return "vsubhn"; + case kVsubl: + return "vsubl"; + case kVsubw: + return "vsubw"; + case kVswp: + return "vswp"; + case kVtbl: + return "vtbl"; + case kVtbx: + return "vtbx"; + case kVtrn: + return "vtrn"; + case kVtst: + return "vtst"; + case kVuzp: + return "vuzp"; + case kVzip: + return "vzip"; + case kYield: + return "yield"; + case kUndefInstructionType: + VIXL_UNREACHABLE(); + return ""; + } + VIXL_UNREACHABLE(); + return ""; +} // NOLINT(readability/fn_size) +// End of generated code. + +} // namespace aarch32 +} // namespace vixl diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch32/constants-aarch32.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/constants-aarch32.h new file mode 100644 index 00000000..6d79834d --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/constants-aarch32.h @@ -0,0 +1,541 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_CONSTANTS_AARCH32_H_ +#define VIXL_CONSTANTS_AARCH32_H_ + +extern "C" { +#include +} + +#include "globals-vixl.h" + + +namespace vixl { +namespace aarch32 { + +enum InstructionSet { A32, T32 }; +#ifdef VIXL_INCLUDE_TARGET_T32_ONLY +const InstructionSet kDefaultISA = T32; +#else +const InstructionSet kDefaultISA = A32; +#endif + +const unsigned kRegSizeInBits = 32; +const unsigned kRegSizeInBytes = kRegSizeInBits / 8; +const unsigned kSRegSizeInBits = 32; +const unsigned kSRegSizeInBytes = kSRegSizeInBits / 8; +const unsigned kDRegSizeInBits = 64; +const unsigned kDRegSizeInBytes = kDRegSizeInBits / 8; +const unsigned kQRegSizeInBits = 128; +const unsigned kQRegSizeInBytes = kQRegSizeInBits / 8; + +const unsigned kNumberOfRegisters = 16; +const unsigned kNumberOfSRegisters = 32; +const unsigned kMaxNumberOfDRegisters = 32; +const unsigned kNumberOfQRegisters = 16; +const unsigned kNumberOfT32LowRegisters = 8; + +const unsigned kIpCode = 12; +const unsigned kSpCode = 13; +const unsigned kLrCode = 14; +const unsigned kPcCode = 15; + +const unsigned kT32PcDelta = 4; +const unsigned kA32PcDelta = 8; + +const unsigned kRRXEncodedValue = 3; + +const unsigned kCoprocMask = 0xe; +const unsigned kInvalidCoprocMask = 0xa; + +const unsigned kLowestT32_32Opcode = 0xe8000000; + +const uint32_t kUnknownValue = 0xdeadbeef; + +const uint32_t kMaxInstructionSizeInBytes = 4; +const uint32_t kA32InstructionSizeInBytes = 4; +const uint32_t k32BitT32InstructionSizeInBytes = 4; +const uint32_t k16BitT32InstructionSizeInBytes = 2; + +// Maximum size emitted by a single T32 unconditional macro-instruction. +const uint32_t kMaxT32MacroInstructionSizeInBytes = 32; + +const uint32_t kCallerSavedRegistersMask = 0x500f; + +const uint16_t k16BitT32NopOpcode = 0xbf00; +const uint16_t kCbzCbnzMask = 0xf500; +const uint16_t kCbzCbnzValue = 0xb100; + +const int32_t kCbzCbnzRange = 126; +const int32_t kBConditionalNarrowRange = 254; +const int32_t kBNarrowRange = 2046; +const int32_t kNearLabelRange = kBNarrowRange; + +enum SystemFunctionsOpcodes { kPrintfCode }; + +enum BranchHint { kNear, kFar, kBranchWithoutHint }; + +// Start of generated code. +// AArch32 version implemented by the library (v8.0). +// The encoding for vX.Y is: (X << 8) | Y. +#define AARCH32_VERSION 0x0800 + +enum InstructionAttribute { + kNoAttribute = 0, + kArithmetic = 0x1, + kBitwise = 0x2, + kShift = 0x4, + kAddress = 0x8, + kBranch = 0x10, + kSystem = 0x20, + kFpNeon = 0x40, + kLoadStore = 0x80, + kLoadStoreMultiple = 0x100 +}; + +enum InstructionType { + kUndefInstructionType, + kAdc, + kAdcs, + kAdd, + kAdds, + kAddw, + kAdr, + kAnd, + kAnds, + kAsr, + kAsrs, + kB, + kBfc, + kBfi, + kBic, + kBics, + kBkpt, + kBl, + kBlx, + kBx, + kBxj, + kCbnz, + kCbz, + kClrex, + kClz, + kCmn, + kCmp, + kCrc32b, + kCrc32cb, + kCrc32ch, + kCrc32cw, + kCrc32h, + kCrc32w, + kDmb, + kDsb, + kEor, + kEors, + kFldmdbx, + kFldmiax, + kFstmdbx, + kFstmiax, + kHlt, + kHvc, + kIsb, + kIt, + kLda, + kLdab, + kLdaex, + kLdaexb, + kLdaexd, + kLdaexh, + kLdah, + kLdm, + kLdmda, + kLdmdb, + kLdmea, + kLdmed, + kLdmfa, + kLdmfd, + kLdmib, + kLdr, + kLdrb, + kLdrd, + kLdrex, + kLdrexb, + kLdrexd, + kLdrexh, + kLdrh, + kLdrsb, + kLdrsh, + kLsl, + kLsls, + kLsr, + kLsrs, + kMla, + kMlas, + kMls, + kMov, + kMovs, + kMovt, + kMovw, + kMrs, + kMsr, + kMul, + kMuls, + kMvn, + kMvns, + kNop, + kOrn, + kOrns, + kOrr, + kOrrs, + kPkhbt, + kPkhtb, + kPld, + kPldw, + kPli, + kPop, + kPush, + kQadd, + kQadd16, + kQadd8, + kQasx, + kQdadd, + kQdsub, + kQsax, + kQsub, + kQsub16, + kQsub8, + kRbit, + kRev, + kRev16, + kRevsh, + kRor, + kRors, + kRrx, + kRrxs, + kRsb, + kRsbs, + kRsc, + kRscs, + kSadd16, + kSadd8, + kSasx, + kSbc, + kSbcs, + kSbfx, + kSdiv, + kSel, + kShadd16, + kShadd8, + kShasx, + kShsax, + kShsub16, + kShsub8, + kSmlabb, + kSmlabt, + kSmlad, + kSmladx, + kSmlal, + kSmlalbb, + kSmlalbt, + kSmlald, + kSmlaldx, + kSmlals, + kSmlaltb, + kSmlaltt, + kSmlatb, + kSmlatt, + kSmlawb, + kSmlawt, + kSmlsd, + kSmlsdx, + kSmlsld, + kSmlsldx, + kSmmla, + kSmmlar, + kSmmls, + kSmmlsr, + kSmmul, + kSmmulr, + kSmuad, + kSmuadx, + kSmulbb, + kSmulbt, + kSmull, + kSmulls, + kSmultb, + kSmultt, + kSmulwb, + kSmulwt, + kSmusd, + kSmusdx, + kSsat, + kSsat16, + kSsax, + kSsub16, + kSsub8, + kStl, + kStlb, + kStlex, + kStlexb, + kStlexd, + kStlexh, + kStlh, + kStm, + kStmda, + kStmdb, + kStmea, + kStmed, + kStmfa, + kStmfd, + kStmib, + kStr, + kStrb, + kStrd, + kStrex, + kStrexb, + kStrexd, + kStrexh, + kStrh, + kSub, + kSubs, + kSubw, + kSvc, + kSxtab, + kSxtab16, + kSxtah, + kSxtb, + kSxtb16, + kSxth, + kTbb, + kTbh, + kTeq, + kTst, + kUadd16, + kUadd8, + kUasx, + kUbfx, + kUdf, + kUdiv, + kUhadd16, + kUhadd8, + kUhasx, + kUhsax, + kUhsub16, + kUhsub8, + kUmaal, + kUmlal, + kUmlals, + kUmull, + kUmulls, + kUqadd16, + kUqadd8, + kUqasx, + kUqsax, + kUqsub16, + kUqsub8, + kUsad8, + kUsada8, + kUsat, + kUsat16, + kUsax, + kUsub16, + kUsub8, + kUxtab, + kUxtab16, + kUxtah, + kUxtb, + kUxtb16, + kUxth, + kVaba, + kVabal, + kVabd, + kVabdl, + kVabs, + kVacge, + kVacgt, + kVacle, + kVaclt, + kVadd, + kVaddhn, + kVaddl, + kVaddw, + kVand, + kVbic, + kVbif, + kVbit, + kVbsl, + kVceq, + kVcge, + kVcgt, + kVcle, + kVcls, + kVclt, + kVclz, + kVcmp, + kVcmpe, + kVcnt, + kVcvt, + kVcvta, + kVcvtb, + kVcvtm, + kVcvtn, + kVcvtp, + kVcvtr, + kVcvtt, + kVdiv, + kVdup, + kVeor, + kVext, + kVfma, + kVfms, + kVfnma, + kVfnms, + kVhadd, + kVhsub, + kVld1, + kVld2, + kVld3, + kVld4, + kVldm, + kVldmdb, + kVldmia, + kVldr, + kVmax, + kVmaxnm, + kVmin, + kVminnm, + kVmla, + kVmlal, + kVmls, + kVmlsl, + kVmov, + kVmovl, + kVmovn, + kVmrs, + kVmsr, + kVmul, + kVmull, + kVmvn, + kVneg, + kVnmla, + kVnmls, + kVnmul, + kVorn, + kVorr, + kVpadal, + kVpadd, + kVpaddl, + kVpmax, + kVpmin, + kVpop, + kVpush, + kVqabs, + kVqadd, + kVqdmlal, + kVqdmlsl, + kVqdmulh, + kVqdmull, + kVqmovn, + kVqmovun, + kVqneg, + kVqrdmulh, + kVqrshl, + kVqrshrn, + kVqrshrun, + kVqshl, + kVqshlu, + kVqshrn, + kVqshrun, + kVqsub, + kVraddhn, + kVrecpe, + kVrecps, + kVrev16, + kVrev32, + kVrev64, + kVrhadd, + kVrinta, + kVrintm, + kVrintn, + kVrintp, + kVrintr, + kVrintx, + kVrintz, + kVrshl, + kVrshr, + kVrshrn, + kVrsqrte, + kVrsqrts, + kVrsra, + kVrsubhn, + kVseleq, + kVselge, + kVselgt, + kVselvs, + kVshl, + kVshll, + kVshr, + kVshrn, + kVsli, + kVsqrt, + kVsra, + kVsri, + kVst1, + kVst2, + kVst3, + kVst4, + kVstm, + kVstmdb, + kVstmia, + kVstr, + kVsub, + kVsubhn, + kVsubl, + kVsubw, + kVswp, + kVtbl, + kVtbx, + kVtrn, + kVtst, + kVuzp, + kVzip, + kYield +}; + +const char* ToCString(InstructionType type); +// End of generated code. + +inline InstructionAttribute operator|(InstructionAttribute left, + InstructionAttribute right) { + return static_cast(static_cast(left) | + static_cast(right)); +} + +} // namespace aarch32 +} // namespace vixl + +#endif // VIXL_CONSTANTS_AARCH32_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch32/disasm-aarch32.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/disasm-aarch32.cc new file mode 100644 index 00000000..9ed3a831 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/disasm-aarch32.cc @@ -0,0 +1,67276 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +extern "C" { +#include +} + +#include +#include +#include +#include +#include +#include + +#include "utils-vixl.h" +#include "aarch32/constants-aarch32.h" +#include "aarch32/disasm-aarch32.h" +#include "aarch32/instructions-aarch32.h" +#include "aarch32/operands-aarch32.h" + +namespace vixl { +namespace aarch32 { + +using internal::Int64; +using internal::Uint32; + +class T32CodeAddressIncrementer { + uint32_t* code_address_; + uint32_t increment_; + + public: + T32CodeAddressIncrementer(uint32_t instr, uint32_t* code_address) + : code_address_(code_address), + increment_(Disassembler::Is16BitEncoding(instr) ? 2 : 4) {} + ~T32CodeAddressIncrementer() { *code_address_ += increment_; } +}; + +class A32CodeAddressIncrementer { + uint32_t* code_address_; + + public: + explicit A32CodeAddressIncrementer(uint32_t* code_address) + : code_address_(code_address) {} + ~A32CodeAddressIncrementer() { *code_address_ += 4; } +}; + +class DecodeNeon { + int lane_; + SpacingType spacing_; + bool valid_; + + public: + DecodeNeon(int lane, SpacingType spacing) + : lane_(lane), spacing_(spacing), valid_(true) {} + DecodeNeon() : lane_(0), spacing_(kSingle), valid_(false) {} + int GetLane() const { return lane_; } + SpacingType GetSpacing() const { return spacing_; } + bool IsValid() const { return valid_; } +}; + +class DecodeNeonAndAlign : public DecodeNeon { + public: + Alignment align_; + DecodeNeonAndAlign(int lanes, SpacingType spacing, Alignment align) + : DecodeNeon(lanes, spacing), align_(align) {} + DecodeNeonAndAlign() : align_(kBadAlignment) {} + Alignment GetAlign() const { return align_; } +}; + +// Start of generated code. +DataTypeValue Dt_L_imm6_1_Decode(uint32_t value, uint32_t type_value) { + if ((value & 0xf) == 0x1) { + switch (type_value) { + case 0x0: + return S8; + case 0x1: + return U8; + } + } else if ((value & 0xe) == 0x2) { + switch (type_value) { + case 0x0: + return S16; + case 0x1: + return U16; + } + } else if ((value & 0xc) == 0x4) { + switch (type_value) { + case 0x0: + return S32; + case 0x1: + return U32; + } + } else if ((value & 0x8) == 0x8) { + switch (type_value) { + case 0x0: + return S64; + case 0x1: + return U64; + } + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_L_imm6_2_Decode(uint32_t value, uint32_t type_value) { + if ((value & 0xf) == 0x1) { + if (type_value == 0x1) return S8; + } else if ((value & 0xe) == 0x2) { + if (type_value == 0x1) return S16; + } else if ((value & 0xc) == 0x4) { + if (type_value == 0x1) return S32; + } else if ((value & 0x8) == 0x8) { + if (type_value == 0x1) return S64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_L_imm6_3_Decode(uint32_t value) { + if ((value & 0xf) == 0x1) { + return I8; + } else if ((value & 0xe) == 0x2) { + return I16; + } else if ((value & 0xc) == 0x4) { + return I32; + } else if ((value & 0x8) == 0x8) { + return I64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_L_imm6_4_Decode(uint32_t value) { + if ((value & 0xf) == 0x1) { + return Untyped8; + } else if ((value & 0xe) == 0x2) { + return Untyped16; + } else if ((value & 0xc) == 0x4) { + return Untyped32; + } else if ((value & 0x8) == 0x8) { + return Untyped64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_imm6_1_Decode(uint32_t value, uint32_t type_value) { + if ((value & 0x7) == 0x1) { + switch (type_value) { + case 0x0: + return S16; + case 0x1: + return U16; + } + } else if ((value & 0x6) == 0x2) { + switch (type_value) { + case 0x0: + return S32; + case 0x1: + return U32; + } + } else if ((value & 0x4) == 0x4) { + switch (type_value) { + case 0x0: + return S64; + case 0x1: + return U64; + } + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_imm6_2_Decode(uint32_t value, uint32_t type_value) { + if ((value & 0x7) == 0x1) { + if (type_value == 0x1) return S16; + } else if ((value & 0x6) == 0x2) { + if (type_value == 0x1) return S32; + } else if ((value & 0x4) == 0x4) { + if (type_value == 0x1) return S64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_imm6_3_Decode(uint32_t value) { + if ((value & 0x7) == 0x1) { + return I16; + } else if ((value & 0x6) == 0x2) { + return I32; + } else if ((value & 0x4) == 0x4) { + return I64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_imm6_4_Decode(uint32_t value, uint32_t type_value) { + if ((value & 0x7) == 0x1) { + switch (type_value) { + case 0x0: + return S8; + case 0x1: + return U8; + } + } else if ((value & 0x6) == 0x2) { + switch (type_value) { + case 0x0: + return S16; + case 0x1: + return U16; + } + } else if ((value & 0x4) == 0x4) { + switch (type_value) { + case 0x0: + return S32; + case 0x1: + return U32; + } + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_U_size_1_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S8; + case 0x1: + return S16; + case 0x2: + return S32; + case 0x4: + return U8; + case 0x5: + return U16; + case 0x6: + return U32; + case 0x8: + return P8; + case 0xa: + return P64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_size_1_Decode(uint32_t value) { + switch (value) { + case 0x0: + return I8; + case 0x1: + return I16; + case 0x2: + return I32; + case 0x4: + return P8; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_size_2_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S8; + case 0x1: + return S16; + case 0x2: + return S32; + case 0x4: + return U8; + case 0x5: + return U16; + case 0x6: + return U32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_size_3_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S16; + case 0x1: + return S32; + case 0x2: + return S64; + case 0x4: + return U16; + case 0x5: + return U32; + case 0x6: + return U64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_U_imm3H_1_Decode(uint32_t value) { + switch (value) { + case 0x1: + return S8; + case 0x2: + return S16; + case 0x4: + return S32; + case 0x9: + return U8; + case 0xa: + return U16; + case 0xc: + return U32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_U_opc1_opc2_1_Decode(uint32_t value, unsigned* lane) { + if ((value & 0x18) == 0x8) { + *lane = value & 7; + return S8; + } + if ((value & 0x19) == 0x1) { + *lane = (value >> 1) & 3; + return S16; + } + if ((value & 0x18) == 0x18) { + *lane = value & 7; + return U8; + } + if ((value & 0x19) == 0x11) { + *lane = (value >> 1) & 3; + return U16; + } + if ((value & 0x1b) == 0x0) { + *lane = (value >> 2) & 1; + return Untyped32; + } + *lane = -1; + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_opc1_opc2_1_Decode(uint32_t value, unsigned* lane) { + if ((value & 0x8) == 0x8) { + *lane = value & 7; + return Untyped8; + } + if ((value & 0x9) == 0x1) { + *lane = (value >> 1) & 3; + return Untyped16; + } + if ((value & 0xb) == 0x0) { + *lane = (value >> 2) & 1; + return Untyped32; + } + *lane = -1; + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_imm4_1_Decode(uint32_t value, unsigned* lane) { + if ((value & 0x1) == 0x1) { + *lane = (value >> 1) & 7; + return Untyped8; + } + if ((value & 0x3) == 0x2) { + *lane = (value >> 2) & 3; + return Untyped16; + } + if ((value & 0x7) == 0x4) { + *lane = (value >> 3) & 1; + return Untyped32; + } + *lane = -1; + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_B_E_1_Decode(uint32_t value) { + switch (value) { + case 0x2: + return Untyped8; + case 0x1: + return Untyped16; + case 0x0: + return Untyped32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_1_Decode1(uint32_t value) { + switch (value) { + case 0x0: + return F32; + case 0x1: + return F32; + case 0x2: + return S32; + case 0x3: + return U32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_1_Decode2(uint32_t value) { + switch (value) { + case 0x0: + return S32; + case 0x1: + return U32; + case 0x2: + return F32; + case 0x3: + return F32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_2_Decode(uint32_t value) { + switch (value) { + case 0x0: + return U32; + case 0x1: + return S32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_3_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S32; + case 0x1: + return U32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_U_sx_1_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S16; + case 0x1: + return S32; + case 0x2: + return U16; + case 0x3: + return U32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_U_1_Decode1(uint32_t value) { + switch (value) { + case 0x0: + return F32; + case 0x1: + return F32; + case 0x2: + return S32; + case 0x3: + return U32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_op_U_1_Decode2(uint32_t value) { + switch (value) { + case 0x0: + return S32; + case 0x1: + return U32; + case 0x2: + return F32; + case 0x3: + return F32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_sz_1_Decode(uint32_t value) { + switch (value) { + case 0x0: + return F32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_F_size_1_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S8; + case 0x1: + return S16; + case 0x2: + return S32; + case 0x6: + return F32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_F_size_2_Decode(uint32_t value) { + switch (value) { + case 0x0: + return I8; + case 0x1: + return I16; + case 0x2: + return I32; + case 0x6: + return F32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_F_size_3_Decode(uint32_t value) { + switch (value) { + case 0x1: + return I16; + case 0x2: + return I32; + case 0x6: + return F32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_F_size_4_Decode(uint32_t value) { + switch (value) { + case 0x2: + return U32; + case 0x6: + return F32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_U_size_1_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S8; + case 0x1: + return S16; + case 0x2: + return S32; + case 0x4: + return U8; + case 0x5: + return U16; + case 0x6: + return U32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_U_size_2_Decode(uint32_t value) { + switch (value) { + case 0x1: + return S16; + case 0x2: + return S32; + case 0x5: + return U16; + case 0x6: + return U32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_U_size_3_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S8; + case 0x1: + return S16; + case 0x2: + return S32; + case 0x3: + return S64; + case 0x4: + return U8; + case 0x5: + return U16; + case 0x6: + return U32; + case 0x7: + return U64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_1_Decode(uint32_t value) { + switch (value) { + case 0x0: + return Untyped8; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_2_Decode(uint32_t value) { + switch (value) { + case 0x0: + return I8; + case 0x1: + return I16; + case 0x2: + return I32; + case 0x3: + return I64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_3_Decode(uint32_t value) { + switch (value) { + case 0x0: + return I16; + case 0x1: + return I32; + case 0x2: + return I64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_4_Decode(uint32_t value) { + switch (value) { + case 0x0: + return I8; + case 0x1: + return I16; + case 0x2: + return I32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_5_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S8; + case 0x1: + return S16; + case 0x2: + return S32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_6_Decode(uint32_t value) { + switch (value) { + case 0x0: + return Untyped8; + case 0x1: + return Untyped16; + case 0x2: + return Untyped32; + case 0x3: + return Untyped64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_7_Decode(uint32_t value) { + switch (value) { + case 0x0: + return Untyped8; + case 0x1: + return Untyped16; + case 0x2: + return Untyped32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_8_Decode(uint32_t value) { + switch (value) { + case 0x0: + return Untyped8; + case 0x1: + return Untyped16; + case 0x2: + return Untyped32; + case 0x3: + return Untyped32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_9_Decode(uint32_t value, uint32_t type_value) { + switch (value) { + case 0x1: + switch (type_value) { + case 0x0: + return I16; + } + break; + case 0x2: + switch (type_value) { + case 0x0: + return I32; + case 0x1: + return F32; + } + break; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_10_Decode(uint32_t value) { + switch (value) { + case 0x0: + return I8; + case 0x1: + return I16; + case 0x2: + return I32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_11_Decode(uint32_t value, uint32_t type_value) { + switch (value) { + case 0x1: + switch (type_value) { + case 0x0: + return S16; + case 0x1: + return U16; + } + break; + case 0x2: + switch (type_value) { + case 0x0: + return S32; + case 0x1: + return U32; + } + break; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_12_Decode(uint32_t value, uint32_t type_value) { + switch (value) { + case 0x0: + switch (type_value) { + case 0x0: + return S8; + case 0x1: + return U8; + } + break; + case 0x1: + switch (type_value) { + case 0x0: + return S16; + case 0x1: + return U16; + } + break; + case 0x2: + switch (type_value) { + case 0x0: + return S32; + case 0x1: + return U32; + } + break; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_13_Decode(uint32_t value) { + switch (value) { + case 0x1: + return S16; + case 0x2: + return S32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_14_Decode(uint32_t value) { + switch (value) { + case 0x0: + return S16; + case 0x1: + return S32; + case 0x2: + return S64; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_15_Decode(uint32_t value) { + switch (value) { + case 0x0: + return Untyped8; + case 0x1: + return Untyped16; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_16_Decode(uint32_t value) { + switch (value) { + case 0x2: + return F32; + } + return kDataTypeValueInvalid; +} + +DataTypeValue Dt_size_17_Decode(uint32_t value) { + switch (value) { + case 0x0: + return I8; + case 0x1: + return I16; + case 0x2: + return I32; + } + return kDataTypeValueInvalid; +} + +DecodeNeon Index_1_Decode(uint32_t value, DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + int lane = (value >> 1) & 0x7; + if ((value & 1) != 0) break; + SpacingType spacing = kSingle; + return DecodeNeon(lane, spacing); + } + case Untyped16: { + int lane = (value >> 2) & 0x3; + if ((value & 1) != 0) break; + SpacingType spacing = ((value & 3) == 2) ? kDouble : kSingle; + return DecodeNeon(lane, spacing); + } + case Untyped32: { + int lane = (value >> 3) & 0x1; + if ((value & 3) != 0) break; + SpacingType spacing = ((value & 7) == 4) ? kDouble : kSingle; + return DecodeNeon(lane, spacing); + } + default: + break; + } + return DecodeNeon(); +} + +DecodeNeonAndAlign Align_index_align_1_Decode(uint32_t value, DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + AlignmentType align; + if ((value & 1) == 0) { + align = kNoAlignment; + } else { + break; + } + int lane = (value >> 1) & 0x7; + SpacingType spacing = kSingle; + return DecodeNeonAndAlign(lane, spacing, align); + } + case Untyped16: { + AlignmentType align; + if ((value & 3) == 1) { + align = k16BitAlign; + } else if ((value & 3) == 0) { + align = kNoAlignment; + } else { + break; + } + int lane = (value >> 2) & 0x3; + SpacingType spacing = kSingle; + return DecodeNeonAndAlign(lane, spacing, align); + } + case Untyped32: { + AlignmentType align; + if ((value & 7) == 3) { + align = k32BitAlign; + } else if ((value & 7) == 0) { + align = kNoAlignment; + } else { + break; + } + int lane = (value >> 3) & 0x1; + SpacingType spacing = kSingle; + return DecodeNeonAndAlign(lane, spacing, align); + } + default: + break; + } + return DecodeNeonAndAlign(); +} + +DecodeNeonAndAlign Align_index_align_2_Decode(uint32_t value, DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + AlignmentType align; + if ((value & 1) == 1) { + align = k16BitAlign; + } else if ((value & 1) == 0) { + align = kNoAlignment; + } else { + break; + } + int lane = (value >> 1) & 0x7; + SpacingType spacing = kSingle; + return DecodeNeonAndAlign(lane, spacing, align); + } + case Untyped16: { + AlignmentType align; + if ((value & 1) == 1) { + align = k32BitAlign; + } else if ((value & 1) == 0) { + align = kNoAlignment; + } else { + break; + } + int lane = (value >> 2) & 0x3; + SpacingType spacing = ((value & 2) == 2) ? kDouble : kSingle; + return DecodeNeonAndAlign(lane, spacing, align); + } + case Untyped32: { + AlignmentType align; + if ((value & 3) == 1) { + align = k64BitAlign; + } else if ((value & 3) == 0) { + align = kNoAlignment; + } else { + break; + } + int lane = (value >> 3) & 0x1; + SpacingType spacing = ((value & 4) == 4) ? kDouble : kSingle; + return DecodeNeonAndAlign(lane, spacing, align); + } + default: + break; + } + return DecodeNeonAndAlign(); +} + +DecodeNeonAndAlign Align_index_align_3_Decode(uint32_t value, DataType dt) { + switch (dt.GetValue()) { + case Untyped8: { + AlignmentType align; + if ((value & 1) == 1) { + align = k32BitAlign; + } else if ((value & 1) == 0) { + align = kNoAlignment; + } else { + break; + } + int lane = (value >> 1) & 0x7; + SpacingType spacing = kSingle; + return DecodeNeonAndAlign(lane, spacing, align); + } + case Untyped16: { + AlignmentType align; + if ((value & 1) == 1) { + align = k64BitAlign; + } else if ((value & 1) == 0) { + align = kNoAlignment; + } else { + break; + } + int lane = (value >> 2) & 0x3; + SpacingType spacing = ((value & 2) == 2) ? kDouble : kSingle; + return DecodeNeonAndAlign(lane, spacing, align); + } + case Untyped32: { + AlignmentType align; + if ((value & 3) == 1) { + align = k64BitAlign; + } else if ((value & 3) == 2) { + align = k128BitAlign; + } else if ((value & 3) == 0) { + align = kNoAlignment; + } else { + break; + } + int lane = (value >> 3) & 0x1; + SpacingType spacing = ((value & 4) == 4) ? kDouble : kSingle; + return DecodeNeonAndAlign(lane, spacing, align); + } + default: + break; + } + return DecodeNeonAndAlign(); +} + +Alignment Align_a_1_Decode(uint32_t value, DataType dt) { + switch (value) { + case 0: + return kNoAlignment; + case 1: + if (dt.Is(Untyped16)) return k16BitAlign; + if (dt.Is(Untyped32)) return k32BitAlign; + break; + default: + break; + } + return kBadAlignment; +} + +Alignment Align_a_2_Decode(uint32_t value, DataType dt) { + switch (value) { + case 0: + return kNoAlignment; + case 1: + if (dt.Is(Untyped8)) return k16BitAlign; + if (dt.Is(Untyped16)) return k32BitAlign; + if (dt.Is(Untyped32)) return k64BitAlign; + break; + default: + break; + } + return kBadAlignment; +} + +Alignment Align_a_3_Decode(uint32_t value, DataType dt, uint32_t size) { + switch (value) { + case 0: + if (size != 3) return kNoAlignment; + break; + case 1: + if (dt.Is(Untyped8)) return k32BitAlign; + if (dt.Is(Untyped16)) return k64BitAlign; + if (size == 2) return k64BitAlign; + if (size == 3) return k128BitAlign; + break; + default: + break; + } + return kBadAlignment; +} + +Alignment Align_align_1_Decode(uint32_t value) { + switch (value) { + case 0: + return kNoAlignment; + case 1: + return k64BitAlign; + case 2: + return k128BitAlign; + case 3: + return k256BitAlign; + default: + break; + } + return kBadAlignment; +} + +Alignment Align_align_2_Decode(uint32_t value) { + switch (value) { + case 0: + return kNoAlignment; + case 1: + return k64BitAlign; + case 2: + return k128BitAlign; + case 3: + return k256BitAlign; + default: + break; + } + return kBadAlignment; +} + +Alignment Align_align_3_Decode(uint32_t value) { + switch (value) { + case 0: + return kNoAlignment; + case 1: + return k64BitAlign; + default: + break; + } + return kBadAlignment; +} + +Alignment Align_align_4_Decode(uint32_t value) { + switch (value) { + case 0: + return kNoAlignment; + case 1: + return k64BitAlign; + case 2: + return k128BitAlign; + case 3: + return k256BitAlign; + default: + break; + } + return kBadAlignment; +} + +Alignment Align_align_5_Decode(uint32_t value) { + switch (value) { + case 0: + return kNoAlignment; + case 1: + return k64BitAlign; + case 2: + return k128BitAlign; + case 3: + return k256BitAlign; + default: + break; + } + return kBadAlignment; +} + + +void Disassembler::adc(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kAdc, kArithmetic); + os() << ToCString(kAdc) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::adcs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kAdcs, kArithmetic); + os() << ToCString(kAdcs) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::add(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kAdd, kArithmetic); + os() << ToCString(kAdd) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::add(Condition cond, Register rd, const Operand& operand) { + os().SetCurrentInstruction(kAdd, kArithmetic); + os() << ToCString(kAdd) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << operand; +} + +void Disassembler::adds(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kAdds, kArithmetic); + os() << ToCString(kAdds) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::adds(Register rd, const Operand& operand) { + os().SetCurrentInstruction(kAdds, kArithmetic); + os() << ToCString(kAdds) << " " << rd << ", " << operand; +} + +void Disassembler::addw(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kAddw, kArithmetic); + os() << ToCString(kAddw) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::adr(Condition cond, + EncodingSize size, + Register rd, + Location* location) { + os().SetCurrentInstruction(kAdr, kAddress); + os() << ToCString(kAdr) << ConditionPrinter(it_block_, cond) << size << " " + << rd << ", " + << PrintLabel(kAnyLocation, location, GetCodeAddress() & ~3); +} + +void Disassembler::and_(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kAnd, kBitwise); + os() << ToCString(kAnd) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::ands(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kAnds, kBitwise); + os() << ToCString(kAnds) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::asr(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + os().SetCurrentInstruction(kAsr, kShift); + os() << ToCString(kAsr) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::asrs(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + os().SetCurrentInstruction(kAsrs, kShift); + os() << ToCString(kAsrs) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::b(Condition cond, EncodingSize size, Location* location) { + os().SetCurrentInstruction(kB, kAddress | kBranch); + os() << ToCString(kB) << ConditionPrinter(it_block_, cond) << size << " " + << PrintLabel(kCodeLocation, location, GetCodeAddress()); +} + +void Disassembler::bfc(Condition cond, + Register rd, + uint32_t lsb, + uint32_t width) { + os().SetCurrentInstruction(kBfc, kShift); + os() << ToCString(kBfc) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << ImmediatePrinter(lsb) << ", " << ImmediatePrinter(width); +} + +void Disassembler::bfi( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width) { + os().SetCurrentInstruction(kBfi, kShift); + os() << ToCString(kBfi) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << ImmediatePrinter(lsb) << ", " + << ImmediatePrinter(width); +} + +void Disassembler::bic(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kBic, kBitwise); + os() << ToCString(kBic) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::bics(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kBics, kBitwise); + os() << ToCString(kBics) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::bkpt(Condition cond, uint32_t imm) { + os().SetCurrentInstruction(kBkpt, kSystem); + os() << ToCString(kBkpt) << ConditionPrinter(it_block_, cond) << " " + << RawImmediatePrinter(imm); +} + +void Disassembler::bl(Condition cond, Location* location) { + os().SetCurrentInstruction(kBl, kAddress | kBranch); + os() << ToCString(kBl) << ConditionPrinter(it_block_, cond) << " " + << PrintLabel(kCodeLocation, location, GetCodeAddress()); +} + +void Disassembler::blx(Condition cond, Location* location) { + os().SetCurrentInstruction(kBlx, kAddress | kBranch); + os() << ToCString(kBlx) << ConditionPrinter(it_block_, cond) << " " + << PrintLabel(kCodeLocation, location, GetCodeAddress() & ~3); +} + +void Disassembler::blx(Condition cond, Register rm) { + os().SetCurrentInstruction(kBlx, kAddress | kBranch); + os() << ToCString(kBlx) << ConditionPrinter(it_block_, cond) << " " << rm; +} + +void Disassembler::bx(Condition cond, Register rm) { + os().SetCurrentInstruction(kBx, kAddress | kBranch); + os() << ToCString(kBx) << ConditionPrinter(it_block_, cond) << " " << rm; +} + +void Disassembler::bxj(Condition cond, Register rm) { + os().SetCurrentInstruction(kBxj, kAddress | kBranch); + os() << ToCString(kBxj) << ConditionPrinter(it_block_, cond) << " " << rm; +} + +void Disassembler::cbnz(Register rn, Location* location) { + os().SetCurrentInstruction(kCbnz, kAddress | kBranch); + os() << ToCString(kCbnz) << " " << rn << ", " + << PrintLabel(kCodeLocation, location, GetCodeAddress()); +} + +void Disassembler::cbz(Register rn, Location* location) { + os().SetCurrentInstruction(kCbz, kAddress | kBranch); + os() << ToCString(kCbz) << " " << rn << ", " + << PrintLabel(kCodeLocation, location, GetCodeAddress()); +} + +void Disassembler::clrex(Condition cond) { + os().SetCurrentInstruction(kClrex, kNoAttribute); + os() << ToCString(kClrex) << ConditionPrinter(it_block_, cond); +} + +void Disassembler::clz(Condition cond, Register rd, Register rm) { + os().SetCurrentInstruction(kClz, kNoAttribute); + os() << ToCString(kClz) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rm; +} + +void Disassembler::cmn(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kCmn, kArithmetic); + os() << ToCString(kCmn) << ConditionPrinter(it_block_, cond) << size << " " + << rn << ", " << operand; +} + +void Disassembler::cmp(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kCmp, kArithmetic); + os() << ToCString(kCmp) << ConditionPrinter(it_block_, cond) << size << " " + << rn << ", " << operand; +} + +void Disassembler::crc32b(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kCrc32b, kNoAttribute); + os() << ToCString(kCrc32b) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm; +} + +void Disassembler::crc32cb(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kCrc32cb, kNoAttribute); + os() << ToCString(kCrc32cb) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm; +} + +void Disassembler::crc32ch(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kCrc32ch, kNoAttribute); + os() << ToCString(kCrc32ch) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm; +} + +void Disassembler::crc32cw(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kCrc32cw, kNoAttribute); + os() << ToCString(kCrc32cw) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm; +} + +void Disassembler::crc32h(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kCrc32h, kNoAttribute); + os() << ToCString(kCrc32h) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm; +} + +void Disassembler::crc32w(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kCrc32w, kNoAttribute); + os() << ToCString(kCrc32w) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm; +} + +void Disassembler::dmb(Condition cond, MemoryBarrier option) { + os().SetCurrentInstruction(kDmb, kNoAttribute); + os() << ToCString(kDmb) << ConditionPrinter(it_block_, cond) << " " << option; +} + +void Disassembler::dsb(Condition cond, MemoryBarrier option) { + os().SetCurrentInstruction(kDsb, kNoAttribute); + os() << ToCString(kDsb) << ConditionPrinter(it_block_, cond) << " " << option; +} + +void Disassembler::eor(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kEor, kBitwise); + os() << ToCString(kEor) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::eors(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kEors, kBitwise); + os() << ToCString(kEors) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::fldmdbx(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kFldmdbx, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kFldmdbx) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << dreglist; +} + +void Disassembler::fldmiax(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kFldmiax, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kFldmiax) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << dreglist; +} + +void Disassembler::fstmdbx(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kFstmdbx, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kFstmdbx) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << dreglist; +} + +void Disassembler::fstmiax(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kFstmiax, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kFstmiax) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << dreglist; +} + +void Disassembler::hlt(Condition cond, uint32_t imm) { + os().SetCurrentInstruction(kHlt, kSystem); + os() << ToCString(kHlt) << ConditionPrinter(it_block_, cond) << " " + << RawImmediatePrinter(imm); +} + +void Disassembler::hvc(Condition cond, uint32_t imm) { + os().SetCurrentInstruction(kHvc, kSystem); + os() << ToCString(kHvc) << ConditionPrinter(it_block_, cond) << " " + << RawImmediatePrinter(imm); +} + +void Disassembler::isb(Condition cond, MemoryBarrier option) { + os().SetCurrentInstruction(kIsb, kNoAttribute); + os() << ToCString(kIsb) << ConditionPrinter(it_block_, cond) << " " << option; +} + +void Disassembler::it(Condition cond, uint16_t mask) { + os().SetCurrentInstruction(kIt, kNoAttribute); + os() << ToCString(kIt); + int count; + if ((mask & 0x1) != 0) { + count = 3; + } else if ((mask & 0x2) != 0) { + count = 2; + } else if ((mask & 0x4) != 0) { + count = 1; + } else { + count = 0; + } + uint16_t tmp = 0x8; + uint16_t ref = (cond.GetCondition() & 0x1) << 3; + while (count-- > 0) { + os() << (((mask & tmp) == ref) ? "t" : "e"); + tmp >>= 1; + ref >>= 1; + } + if (cond.Is(al)) { + os() << " al"; + } else { + os() << " " << cond; + } +} + +void Disassembler::lda(Condition cond, Register rt, const MemOperand& operand) { + os().SetCurrentInstruction(kLda, kAddress | kLoadStore); + os() << ToCString(kLda) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kLoadWordLocation, operand); +} + +void Disassembler::ldab(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdab, kAddress | kLoadStore); + os() << ToCString(kLdab) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kLoadByteLocation, operand); +} + +void Disassembler::ldaex(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdaex, kAddress | kLoadStore); + os() << ToCString(kLdaex) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kLoadWordLocation, operand); +} + +void Disassembler::ldaexb(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdaexb, kAddress | kLoadStore); + os() << ToCString(kLdaexb) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kLoadByteLocation, operand); +} + +void Disassembler::ldaexd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdaexd, kAddress | kLoadStore); + os() << ToCString(kLdaexd) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << rt2 << ", " + << PrintMemOperand(kLoadDoubleWordLocation, operand); +} + +void Disassembler::ldaexh(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdaexh, kAddress | kLoadStore); + os() << ToCString(kLdaexh) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kLoadHalfWordLocation, operand); +} + +void Disassembler::ldah(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdah, kAddress | kLoadStore); + os() << ToCString(kLdah) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kLoadHalfWordLocation, operand); +} + +void Disassembler::ldm(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kLdm, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kLdm) << ConditionPrinter(it_block_, cond) << size << " " + << rn << write_back << ", " << registers; +} + +void Disassembler::ldmda(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kLdmda, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kLdmda) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::ldmdb(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kLdmdb, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kLdmdb) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::ldmea(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kLdmea, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kLdmea) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::ldmed(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kLdmed, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kLdmed) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::ldmfa(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kLdmfa, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kLdmfa) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::ldmfd(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kLdmfd, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kLdmfd) << ConditionPrinter(it_block_, cond) << size << " " + << rn << write_back << ", " << registers; +} + +void Disassembler::ldmib(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kLdmib, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kLdmib) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::ldr(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdr, kAddress | kLoadStore); + os() << ToCString(kLdr) << ConditionPrinter(it_block_, cond) << size << " " + << rt << ", " << PrintMemOperand(kLoadWordLocation, operand); +} + +void Disassembler::ldr(Condition cond, + EncodingSize size, + Register rt, + Location* location) { + os().SetCurrentInstruction(kLdr, kAddress | kLoadStore); + os() << ToCString(kLdr) << ConditionPrinter(it_block_, cond) << size << " " + << rt << ", " + << PrintLabel(kLoadWordLocation, location, GetCodeAddress() & ~3); +} + +void Disassembler::ldrb(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdrb, kAddress | kLoadStore); + os() << ToCString(kLdrb) << ConditionPrinter(it_block_, cond) << size << " " + << rt << ", " << PrintMemOperand(kLoadByteLocation, operand); +} + +void Disassembler::ldrb(Condition cond, Register rt, Location* location) { + os().SetCurrentInstruction(kLdrb, kAddress | kLoadStore); + os() << ToCString(kLdrb) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " + << PrintLabel(kLoadByteLocation, location, GetCodeAddress() & ~3); +} + +void Disassembler::ldrd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdrd, kAddress | kLoadStore); + os() << ToCString(kLdrd) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << rt2 << ", " + << PrintMemOperand(kLoadDoubleWordLocation, operand); +} + +void Disassembler::ldrd(Condition cond, + Register rt, + Register rt2, + Location* location) { + os().SetCurrentInstruction(kLdrd, kAddress | kLoadStore); + os() << ToCString(kLdrd) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << rt2 << ", " + << PrintLabel(kLoadDoubleWordLocation, location, GetCodeAddress() & ~3); +} + +void Disassembler::ldrex(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdrex, kAddress | kLoadStore); + os() << ToCString(kLdrex) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kLoadWordLocation, operand); +} + +void Disassembler::ldrexb(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdrexb, kAddress | kLoadStore); + os() << ToCString(kLdrexb) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kLoadByteLocation, operand); +} + +void Disassembler::ldrexd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdrexd, kAddress | kLoadStore); + os() << ToCString(kLdrexd) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << rt2 << ", " + << PrintMemOperand(kLoadDoubleWordLocation, operand); +} + +void Disassembler::ldrexh(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdrexh, kAddress | kLoadStore); + os() << ToCString(kLdrexh) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kLoadHalfWordLocation, operand); +} + +void Disassembler::ldrh(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdrh, kAddress | kLoadStore); + os() << ToCString(kLdrh) << ConditionPrinter(it_block_, cond) << size << " " + << rt << ", " << PrintMemOperand(kLoadHalfWordLocation, operand); +} + +void Disassembler::ldrh(Condition cond, Register rt, Location* location) { + os().SetCurrentInstruction(kLdrh, kAddress | kLoadStore); + os() << ToCString(kLdrh) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " + << PrintLabel(kLoadHalfWordLocation, location, GetCodeAddress() & ~3); +} + +void Disassembler::ldrsb(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdrsb, kAddress | kLoadStore); + os() << ToCString(kLdrsb) << ConditionPrinter(it_block_, cond) << size << " " + << rt << ", " << PrintMemOperand(kLoadSignedByteLocation, operand); +} + +void Disassembler::ldrsb(Condition cond, Register rt, Location* location) { + os().SetCurrentInstruction(kLdrsb, kAddress | kLoadStore); + os() << ToCString(kLdrsb) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " + << PrintLabel(kLoadSignedByteLocation, location, GetCodeAddress() & ~3); +} + +void Disassembler::ldrsh(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kLdrsh, kAddress | kLoadStore); + os() << ToCString(kLdrsh) << ConditionPrinter(it_block_, cond) << size << " " + << rt << ", " << PrintMemOperand(kLoadSignedHalfWordLocation, operand); +} + +void Disassembler::ldrsh(Condition cond, Register rt, Location* location) { + os().SetCurrentInstruction(kLdrsh, kAddress | kLoadStore); + os() << ToCString(kLdrsh) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintLabel(kLoadSignedHalfWordLocation, + location, + GetCodeAddress() & ~3); +} + +void Disassembler::lsl(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + os().SetCurrentInstruction(kLsl, kShift); + os() << ToCString(kLsl) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::lsls(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + os().SetCurrentInstruction(kLsls, kShift); + os() << ToCString(kLsls) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::lsr(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + os().SetCurrentInstruction(kLsr, kShift); + os() << ToCString(kLsr) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::lsrs(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + os().SetCurrentInstruction(kLsrs, kShift); + os() << ToCString(kLsrs) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::mla( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kMla, kArithmetic); + os() << ToCString(kMla) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::mlas( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kMlas, kArithmetic); + os() << ToCString(kMlas) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::mls( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kMls, kArithmetic); + os() << ToCString(kMls) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::mov(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand) { + os().SetCurrentInstruction(kMov, kNoAttribute); + os() << ToCString(kMov) << ConditionPrinter(it_block_, cond) << size << " " + << rd << ", " << operand; +} + +void Disassembler::movs(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand) { + os().SetCurrentInstruction(kMovs, kNoAttribute); + os() << ToCString(kMovs) << ConditionPrinter(it_block_, cond) << size << " " + << rd << ", " << operand; +} + +void Disassembler::movt(Condition cond, Register rd, const Operand& operand) { + os().SetCurrentInstruction(kMovt, kNoAttribute); + os() << ToCString(kMovt) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << operand; +} + +void Disassembler::movw(Condition cond, Register rd, const Operand& operand) { + os().SetCurrentInstruction(kMovw, kNoAttribute); + os() << ToCString(kMovw) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << operand; +} + +void Disassembler::mrs(Condition cond, Register rd, SpecialRegister spec_reg) { + os().SetCurrentInstruction(kMrs, kNoAttribute); + os() << ToCString(kMrs) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << spec_reg; +} + +void Disassembler::msr(Condition cond, + MaskedSpecialRegister spec_reg, + const Operand& operand) { + os().SetCurrentInstruction(kMsr, kNoAttribute); + os() << ToCString(kMsr) << ConditionPrinter(it_block_, cond) << " " + << spec_reg << ", " << operand; +} + +void Disassembler::mul( + Condition cond, EncodingSize size, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kMul, kArithmetic); + os() << ToCString(kMul) << ConditionPrinter(it_block_, cond) << size << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::muls(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kMuls, kArithmetic); + os() << ToCString(kMuls) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm; +} + +void Disassembler::mvn(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand) { + os().SetCurrentInstruction(kMvn, kNoAttribute); + os() << ToCString(kMvn) << ConditionPrinter(it_block_, cond) << size << " " + << rd << ", " << operand; +} + +void Disassembler::mvns(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand) { + os().SetCurrentInstruction(kMvns, kNoAttribute); + os() << ToCString(kMvns) << ConditionPrinter(it_block_, cond) << size << " " + << rd << ", " << operand; +} + +void Disassembler::nop(Condition cond, EncodingSize size) { + os().SetCurrentInstruction(kNop, kNoAttribute); + os() << ToCString(kNop) << ConditionPrinter(it_block_, cond) << size; +} + +void Disassembler::orn(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kOrn, kBitwise); + os() << ToCString(kOrn) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::orns(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kOrns, kBitwise); + os() << ToCString(kOrns) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::orr(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kOrr, kBitwise); + os() << ToCString(kOrr) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::orrs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kOrrs, kBitwise); + os() << ToCString(kOrrs) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::pkhbt(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kPkhbt, kNoAttribute); + os() << ToCString(kPkhbt) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::pkhtb(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kPkhtb, kNoAttribute); + os() << ToCString(kPkhtb) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::pld(Condition cond, Location* location) { + os().SetCurrentInstruction(kPld, kAddress); + os() << ToCString(kPld) << ConditionPrinter(it_block_, cond) << " " + << PrintLabel(kDataLocation, location, GetCodeAddress() & ~3); +} + +void Disassembler::pld(Condition cond, const MemOperand& operand) { + os().SetCurrentInstruction(kPld, kAddress); + os() << ToCString(kPld) << ConditionPrinter(it_block_, cond) << " " + << PrintMemOperand(kDataLocation, operand); +} + +void Disassembler::pldw(Condition cond, const MemOperand& operand) { + os().SetCurrentInstruction(kPldw, kAddress); + os() << ToCString(kPldw) << ConditionPrinter(it_block_, cond) << " " + << PrintMemOperand(kDataLocation, operand); +} + +void Disassembler::pli(Condition cond, const MemOperand& operand) { + os().SetCurrentInstruction(kPli, kAddress); + os() << ToCString(kPli) << ConditionPrinter(it_block_, cond) << " " + << PrintMemOperand(kCodeLocation, operand); +} + +void Disassembler::pli(Condition cond, Location* location) { + os().SetCurrentInstruction(kPli, kAddress); + os() << ToCString(kPli) << ConditionPrinter(it_block_, cond) << " " + << PrintLabel(kCodeLocation, location, GetCodeAddress() & ~3); +} + +void Disassembler::pop(Condition cond, + EncodingSize size, + RegisterList registers) { + os().SetCurrentInstruction(kPop, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kPop) << ConditionPrinter(it_block_, cond) << size << " " + << registers; +} + +void Disassembler::pop(Condition cond, EncodingSize size, Register rt) { + os().SetCurrentInstruction(kPop, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kPop) << ConditionPrinter(it_block_, cond) << size << " " + << "{" << rt << "}"; +} + +void Disassembler::push(Condition cond, + EncodingSize size, + RegisterList registers) { + os().SetCurrentInstruction(kPush, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kPush) << ConditionPrinter(it_block_, cond) << size << " " + << registers; +} + +void Disassembler::push(Condition cond, EncodingSize size, Register rt) { + os().SetCurrentInstruction(kPush, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kPush) << ConditionPrinter(it_block_, cond) << size << " " + << "{" << rt << "}"; +} + +void Disassembler::qadd(Condition cond, Register rd, Register rm, Register rn) { + os().SetCurrentInstruction(kQadd, kArithmetic); + os() << ToCString(kQadd) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << rn; +} + +void Disassembler::qadd16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kQadd16, kArithmetic); + os() << ToCString(kQadd16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::qadd8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kQadd8, kArithmetic); + os() << ToCString(kQadd8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::qasx(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kQasx, kArithmetic); + os() << ToCString(kQasx) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::qdadd(Condition cond, + Register rd, + Register rm, + Register rn) { + os().SetCurrentInstruction(kQdadd, kArithmetic); + os() << ToCString(kQdadd) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << rn; +} + +void Disassembler::qdsub(Condition cond, + Register rd, + Register rm, + Register rn) { + os().SetCurrentInstruction(kQdsub, kArithmetic); + os() << ToCString(kQdsub) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << rn; +} + +void Disassembler::qsax(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kQsax, kArithmetic); + os() << ToCString(kQsax) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::qsub(Condition cond, Register rd, Register rm, Register rn) { + os().SetCurrentInstruction(kQsub, kArithmetic); + os() << ToCString(kQsub) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << rn; +} + +void Disassembler::qsub16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kQsub16, kArithmetic); + os() << ToCString(kQsub16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::qsub8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kQsub8, kArithmetic); + os() << ToCString(kQsub8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::rbit(Condition cond, Register rd, Register rm) { + os().SetCurrentInstruction(kRbit, kNoAttribute); + os() << ToCString(kRbit) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rm; +} + +void Disassembler::rev(Condition cond, + EncodingSize size, + Register rd, + Register rm) { + os().SetCurrentInstruction(kRev, kNoAttribute); + os() << ToCString(kRev) << ConditionPrinter(it_block_, cond) << size << " " + << rd << ", " << rm; +} + +void Disassembler::rev16(Condition cond, + EncodingSize size, + Register rd, + Register rm) { + os().SetCurrentInstruction(kRev16, kNoAttribute); + os() << ToCString(kRev16) << ConditionPrinter(it_block_, cond) << size << " " + << rd << ", " << rm; +} + +void Disassembler::revsh(Condition cond, + EncodingSize size, + Register rd, + Register rm) { + os().SetCurrentInstruction(kRevsh, kNoAttribute); + os() << ToCString(kRevsh) << ConditionPrinter(it_block_, cond) << size << " " + << rd << ", " << rm; +} + +void Disassembler::ror(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + os().SetCurrentInstruction(kRor, kShift); + os() << ToCString(kRor) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::rors(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand) { + os().SetCurrentInstruction(kRors, kShift); + os() << ToCString(kRors) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::rrx(Condition cond, Register rd, Register rm) { + os().SetCurrentInstruction(kRrx, kShift); + os() << ToCString(kRrx) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm; +} + +void Disassembler::rrxs(Condition cond, Register rd, Register rm) { + os().SetCurrentInstruction(kRrxs, kShift); + os() << ToCString(kRrxs) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm; +} + +void Disassembler::rsb(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kRsb, kArithmetic); + os() << ToCString(kRsb) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::rsbs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kRsbs, kArithmetic); + os() << ToCString(kRsbs) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::rsc(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kRsc, kArithmetic); + os() << ToCString(kRsc) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::rscs(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kRscs, kArithmetic); + os() << ToCString(kRscs) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::sadd16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSadd16, kArithmetic); + os() << ToCString(kSadd16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::sadd8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSadd8, kArithmetic); + os() << ToCString(kSadd8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::sasx(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kSasx, kArithmetic); + os() << ToCString(kSasx) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::sbc(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kSbc, kArithmetic); + os() << ToCString(kSbc) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::sbcs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kSbcs, kArithmetic); + os() << ToCString(kSbcs) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::sbfx( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width) { + os().SetCurrentInstruction(kSbfx, kShift); + os() << ToCString(kSbfx) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << ImmediatePrinter(lsb) << ", " + << ImmediatePrinter(width); +} + +void Disassembler::sdiv(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kSdiv, kArithmetic); + os() << ToCString(kSdiv) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::sel(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kSel, kNoAttribute); + os() << ToCString(kSel) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::shadd16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kShadd16, kArithmetic); + os() << ToCString(kShadd16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::shadd8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kShadd8, kArithmetic); + os() << ToCString(kShadd8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::shasx(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kShasx, kArithmetic); + os() << ToCString(kShasx) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::shsax(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kShsax, kArithmetic); + os() << ToCString(kShsax) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::shsub16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kShsub16, kArithmetic); + os() << ToCString(kShsub16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::shsub8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kShsub8, kArithmetic); + os() << ToCString(kShsub8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smlabb( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmlabb, kArithmetic); + os() << ToCString(kSmlabb) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smlabt( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmlabt, kArithmetic); + os() << ToCString(kSmlabt) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smlad( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmlad, kArithmetic); + os() << ToCString(kSmlad) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smladx( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmladx, kArithmetic); + os() << ToCString(kSmladx) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smlal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlal, kArithmetic); + os() << ToCString(kSmlal) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smlalbb( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlalbb, kArithmetic); + os() << ToCString(kSmlalbb) << ConditionPrinter(it_block_, cond) << " " + << rdlo << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smlalbt( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlalbt, kArithmetic); + os() << ToCString(kSmlalbt) << ConditionPrinter(it_block_, cond) << " " + << rdlo << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smlald( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlald, kArithmetic); + os() << ToCString(kSmlald) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smlaldx( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlaldx, kArithmetic); + os() << ToCString(kSmlaldx) << ConditionPrinter(it_block_, cond) << " " + << rdlo << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smlals( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlals, kArithmetic); + os() << ToCString(kSmlals) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smlaltb( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlaltb, kArithmetic); + os() << ToCString(kSmlaltb) << ConditionPrinter(it_block_, cond) << " " + << rdlo << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smlaltt( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlaltt, kArithmetic); + os() << ToCString(kSmlaltt) << ConditionPrinter(it_block_, cond) << " " + << rdlo << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smlatb( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmlatb, kArithmetic); + os() << ToCString(kSmlatb) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smlatt( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmlatt, kArithmetic); + os() << ToCString(kSmlatt) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smlawb( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmlawb, kArithmetic); + os() << ToCString(kSmlawb) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smlawt( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmlawt, kArithmetic); + os() << ToCString(kSmlawt) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smlsd( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmlsd, kArithmetic); + os() << ToCString(kSmlsd) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smlsdx( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmlsdx, kArithmetic); + os() << ToCString(kSmlsdx) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smlsld( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlsld, kArithmetic); + os() << ToCString(kSmlsld) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smlsldx( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmlsldx, kArithmetic); + os() << ToCString(kSmlsldx) << ConditionPrinter(it_block_, cond) << " " + << rdlo << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smmla( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmmla, kArithmetic); + os() << ToCString(kSmmla) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smmlar( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmmlar, kArithmetic); + os() << ToCString(kSmmlar) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smmls( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmmls, kArithmetic); + os() << ToCString(kSmmls) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smmlsr( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kSmmlsr, kArithmetic); + os() << ToCString(kSmmlsr) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::smmul(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmmul, kArithmetic); + os() << ToCString(kSmmul) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smmulr(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmmulr, kArithmetic); + os() << ToCString(kSmmulr) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smuad(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmuad, kArithmetic); + os() << ToCString(kSmuad) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smuadx(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmuadx, kArithmetic); + os() << ToCString(kSmuadx) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smulbb(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmulbb, kArithmetic); + os() << ToCString(kSmulbb) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smulbt(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmulbt, kArithmetic); + os() << ToCString(kSmulbt) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smull( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmull, kArithmetic); + os() << ToCString(kSmull) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smulls( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kSmulls, kArithmetic); + os() << ToCString(kSmulls) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::smultb(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmultb, kArithmetic); + os() << ToCString(kSmultb) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smultt(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmultt, kArithmetic); + os() << ToCString(kSmultt) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smulwb(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmulwb, kArithmetic); + os() << ToCString(kSmulwb) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smulwt(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmulwt, kArithmetic); + os() << ToCString(kSmulwt) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smusd(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmusd, kArithmetic); + os() << ToCString(kSmusd) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::smusdx(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSmusdx, kArithmetic); + os() << ToCString(kSmusdx) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::ssat(Condition cond, + Register rd, + uint32_t imm, + const Operand& operand) { + os().SetCurrentInstruction(kSsat, kArithmetic); + os() << ToCString(kSsat) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << ImmediatePrinter(imm) << ", " << operand; +} + +void Disassembler::ssat16(Condition cond, + Register rd, + uint32_t imm, + Register rn) { + os().SetCurrentInstruction(kSsat16, kArithmetic); + os() << ToCString(kSsat16) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << ImmediatePrinter(imm) << ", " << rn; +} + +void Disassembler::ssax(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kSsax, kArithmetic); + os() << ToCString(kSsax) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::ssub16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSsub16, kArithmetic); + os() << ToCString(kSsub16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::ssub8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kSsub8, kArithmetic); + os() << ToCString(kSsub8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::stl(Condition cond, Register rt, const MemOperand& operand) { + os().SetCurrentInstruction(kStl, kAddress | kLoadStore); + os() << ToCString(kStl) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kStoreWordLocation, operand); +} + +void Disassembler::stlb(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStlb, kAddress | kLoadStore); + os() << ToCString(kStlb) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kStoreByteLocation, operand); +} + +void Disassembler::stlex(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStlex, kAddress | kLoadStore); + os() << ToCString(kStlex) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rt << ", " << PrintMemOperand(kStoreWordLocation, operand); +} + +void Disassembler::stlexb(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStlexb, kAddress | kLoadStore); + os() << ToCString(kStlexb) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rt << ", " << PrintMemOperand(kStoreByteLocation, operand); +} + +void Disassembler::stlexd(Condition cond, + Register rd, + Register rt, + Register rt2, + const MemOperand& operand) { + os().SetCurrentInstruction(kStlexd, kAddress | kLoadStore); + os() << ToCString(kStlexd) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rt << ", " << rt2 << ", " + << PrintMemOperand(kStoreDoubleWordLocation, operand); +} + +void Disassembler::stlexh(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStlexh, kAddress | kLoadStore); + os() << ToCString(kStlexh) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rt << ", " + << PrintMemOperand(kStoreHalfWordLocation, operand); +} + +void Disassembler::stlh(Condition cond, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStlh, kAddress | kLoadStore); + os() << ToCString(kStlh) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << PrintMemOperand(kStoreHalfWordLocation, operand); +} + +void Disassembler::stm(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kStm, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kStm) << ConditionPrinter(it_block_, cond) << size << " " + << rn << write_back << ", " << registers; +} + +void Disassembler::stmda(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kStmda, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kStmda) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::stmdb(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kStmdb, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kStmdb) << ConditionPrinter(it_block_, cond) << size << " " + << rn << write_back << ", " << registers; +} + +void Disassembler::stmea(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kStmea, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kStmea) << ConditionPrinter(it_block_, cond) << size << " " + << rn << write_back << ", " << registers; +} + +void Disassembler::stmed(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kStmed, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kStmed) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::stmfa(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kStmfa, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kStmfa) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::stmfd(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kStmfd, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kStmfd) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::stmib(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + os().SetCurrentInstruction(kStmib, kLoadStore | kLoadStoreMultiple); + os() << ToCString(kStmib) << ConditionPrinter(it_block_, cond) << " " << rn + << write_back << ", " << registers; +} + +void Disassembler::str(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStr, kAddress | kLoadStore); + os() << ToCString(kStr) << ConditionPrinter(it_block_, cond) << size << " " + << rt << ", " << PrintMemOperand(kStoreWordLocation, operand); +} + +void Disassembler::strb(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStrb, kAddress | kLoadStore); + os() << ToCString(kStrb) << ConditionPrinter(it_block_, cond) << size << " " + << rt << ", " << PrintMemOperand(kStoreByteLocation, operand); +} + +void Disassembler::strd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) { + os().SetCurrentInstruction(kStrd, kAddress | kLoadStore); + os() << ToCString(kStrd) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << rt2 << ", " + << PrintMemOperand(kStoreDoubleWordLocation, operand); +} + +void Disassembler::strex(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStrex, kAddress | kLoadStore); + os() << ToCString(kStrex) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rt << ", " << PrintMemOperand(kStoreWordLocation, operand); +} + +void Disassembler::strexb(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStrexb, kAddress | kLoadStore); + os() << ToCString(kStrexb) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rt << ", " << PrintMemOperand(kStoreByteLocation, operand); +} + +void Disassembler::strexd(Condition cond, + Register rd, + Register rt, + Register rt2, + const MemOperand& operand) { + os().SetCurrentInstruction(kStrexd, kAddress | kLoadStore); + os() << ToCString(kStrexd) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rt << ", " << rt2 << ", " + << PrintMemOperand(kStoreDoubleWordLocation, operand); +} + +void Disassembler::strexh(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStrexh, kAddress | kLoadStore); + os() << ToCString(kStrexh) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rt << ", " + << PrintMemOperand(kStoreHalfWordLocation, operand); +} + +void Disassembler::strh(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand) { + os().SetCurrentInstruction(kStrh, kAddress | kLoadStore); + os() << ToCString(kStrh) << ConditionPrinter(it_block_, cond) << size << " " + << rt << ", " << PrintMemOperand(kStoreHalfWordLocation, operand); +} + +void Disassembler::sub(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kSub, kArithmetic); + os() << ToCString(kSub) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::sub(Condition cond, Register rd, const Operand& operand) { + os().SetCurrentInstruction(kSub, kArithmetic); + os() << ToCString(kSub) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << operand; +} + +void Disassembler::subs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kSubs, kArithmetic); + os() << ToCString(kSubs) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::subs(Register rd, const Operand& operand) { + os().SetCurrentInstruction(kSubs, kArithmetic); + os() << ToCString(kSubs) << " " << rd << ", " << operand; +} + +void Disassembler::subw(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kSubw, kArithmetic); + os() << ToCString(kSubw) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::svc(Condition cond, uint32_t imm) { + os().SetCurrentInstruction(kSvc, kSystem); + os() << ToCString(kSvc) << ConditionPrinter(it_block_, cond) << " " + << RawImmediatePrinter(imm); +} + +void Disassembler::sxtab(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kSxtab, kArithmetic); + os() << ToCString(kSxtab) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::sxtab16(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kSxtab16, kArithmetic); + os() << ToCString(kSxtab16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::sxtah(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kSxtah, kArithmetic); + os() << ToCString(kSxtah) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::sxtb(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand) { + os().SetCurrentInstruction(kSxtb, kArithmetic); + os() << ToCString(kSxtb) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(operand.GetBaseRegister()) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << operand; +} + +void Disassembler::sxtb16(Condition cond, Register rd, const Operand& operand) { + os().SetCurrentInstruction(kSxtb16, kArithmetic); + os() << ToCString(kSxtb16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(operand.GetBaseRegister()) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << operand; +} + +void Disassembler::sxth(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand) { + os().SetCurrentInstruction(kSxth, kArithmetic); + os() << ToCString(kSxth) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(operand.GetBaseRegister()) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << operand; +} + +void Disassembler::tbb(Condition cond, Register rn, Register rm) { + os().SetCurrentInstruction(kTbb, kBranch); + os() << ToCString(kTbb) << ConditionPrinter(it_block_, cond) << " " + << MemOperand(rn, rm); +} + +void Disassembler::tbh(Condition cond, Register rn, Register rm) { + os().SetCurrentInstruction(kTbh, kBranch); + os() << ToCString(kTbh) << ConditionPrinter(it_block_, cond) << " " + << MemOperand(rn, plus, rm, LSL, 1); +} + +void Disassembler::teq(Condition cond, Register rn, const Operand& operand) { + os().SetCurrentInstruction(kTeq, kBitwise); + os() << ToCString(kTeq) << ConditionPrinter(it_block_, cond) << " " << rn + << ", " << operand; +} + +void Disassembler::tst(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kTst, kBitwise); + os() << ToCString(kTst) << ConditionPrinter(it_block_, cond) << size << " " + << rn << ", " << operand; +} + +void Disassembler::uadd16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUadd16, kArithmetic); + os() << ToCString(kUadd16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uadd8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUadd8, kArithmetic); + os() << ToCString(kUadd8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uasx(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kUasx, kArithmetic); + os() << ToCString(kUasx) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::ubfx( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width) { + os().SetCurrentInstruction(kUbfx, kShift); + os() << ToCString(kUbfx) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << ImmediatePrinter(lsb) << ", " + << ImmediatePrinter(width); +} + +void Disassembler::udf(Condition cond, EncodingSize size, uint32_t imm) { + os().SetCurrentInstruction(kUdf, kNoAttribute); + os() << ToCString(kUdf) << ConditionPrinter(it_block_, cond) << size << " " + << RawImmediatePrinter(imm); +} + +void Disassembler::udiv(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kUdiv, kArithmetic); + os() << ToCString(kUdiv) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uhadd16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUhadd16, kArithmetic); + os() << ToCString(kUhadd16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uhadd8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUhadd8, kArithmetic); + os() << ToCString(kUhadd8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uhasx(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUhasx, kArithmetic); + os() << ToCString(kUhasx) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uhsax(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUhsax, kArithmetic); + os() << ToCString(kUhsax) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uhsub16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUhsub16, kArithmetic); + os() << ToCString(kUhsub16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uhsub8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUhsub8, kArithmetic); + os() << ToCString(kUhsub8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::umaal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kUmaal, kArithmetic); + os() << ToCString(kUmaal) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::umlal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kUmlal, kArithmetic); + os() << ToCString(kUmlal) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::umlals( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kUmlals, kArithmetic); + os() << ToCString(kUmlals) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::umull( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kUmull, kArithmetic); + os() << ToCString(kUmull) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::umulls( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + os().SetCurrentInstruction(kUmulls, kArithmetic); + os() << ToCString(kUmulls) << ConditionPrinter(it_block_, cond) << " " << rdlo + << ", " << rdhi << ", " << rn << ", " << rm; +} + +void Disassembler::uqadd16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUqadd16, kArithmetic); + os() << ToCString(kUqadd16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uqadd8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUqadd8, kArithmetic); + os() << ToCString(kUqadd8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uqasx(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUqasx, kArithmetic); + os() << ToCString(kUqasx) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uqsax(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUqsax, kArithmetic); + os() << ToCString(kUqsax) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uqsub16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUqsub16, kArithmetic); + os() << ToCString(kUqsub16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uqsub8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUqsub8, kArithmetic); + os() << ToCString(kUqsub8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::usad8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUsad8, kArithmetic); + os() << ToCString(kUsad8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::usada8( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + os().SetCurrentInstruction(kUsada8, kArithmetic); + os() << ToCString(kUsada8) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << rn << ", " << rm << ", " << ra; +} + +void Disassembler::usat(Condition cond, + Register rd, + uint32_t imm, + const Operand& operand) { + os().SetCurrentInstruction(kUsat, kArithmetic); + os() << ToCString(kUsat) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << ImmediatePrinter(imm) << ", " << operand; +} + +void Disassembler::usat16(Condition cond, + Register rd, + uint32_t imm, + Register rn) { + os().SetCurrentInstruction(kUsat16, kArithmetic); + os() << ToCString(kUsat16) << ConditionPrinter(it_block_, cond) << " " << rd + << ", " << ImmediatePrinter(imm) << ", " << rn; +} + +void Disassembler::usax(Condition cond, Register rd, Register rn, Register rm) { + os().SetCurrentInstruction(kUsax, kArithmetic); + os() << ToCString(kUsax) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::usub16(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUsub16, kArithmetic); + os() << ToCString(kUsub16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::usub8(Condition cond, + Register rd, + Register rn, + Register rm) { + os().SetCurrentInstruction(kUsub8, kArithmetic); + os() << ToCString(kUsub8) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::uxtab(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kUxtab, kArithmetic); + os() << ToCString(kUxtab) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::uxtab16(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kUxtab16, kArithmetic); + os() << ToCString(kUxtab16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::uxtah(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + os().SetCurrentInstruction(kUxtah, kArithmetic); + os() << ToCString(kUxtah) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::uxtb(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand) { + os().SetCurrentInstruction(kUxtb, kArithmetic); + os() << ToCString(kUxtb) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(operand.GetBaseRegister()) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << operand; +} + +void Disassembler::uxtb16(Condition cond, Register rd, const Operand& operand) { + os().SetCurrentInstruction(kUxtb16, kArithmetic); + os() << ToCString(kUxtb16) << ConditionPrinter(it_block_, cond); + os() << " "; + if (!rd.Is(operand.GetBaseRegister()) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << operand; +} + +void Disassembler::uxth(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand) { + os().SetCurrentInstruction(kUxth, kArithmetic); + os() << ToCString(kUxth) << ConditionPrinter(it_block_, cond) << size; + os() << " "; + if (!rd.Is(operand.GetBaseRegister()) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << operand; +} + +void Disassembler::vaba( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVaba, kFpNeon); + os() << ToCString(kVaba) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vaba( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVaba, kFpNeon); + os() << ToCString(kVaba) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vabal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVabal, kFpNeon); + os() << ToCString(kVabal) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vabd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVabd, kFpNeon); + os() << ToCString(kVabd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vabd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVabd, kFpNeon); + os() << ToCString(kVabd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vabdl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVabdl, kFpNeon); + os() << ToCString(kVabdl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vabs(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVabs, kFpNeon); + os() << ToCString(kVabs) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vabs(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVabs, kFpNeon); + os() << ToCString(kVabs) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vabs(Condition cond, + DataType dt, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVabs, kFpNeon); + os() << ToCString(kVabs) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vacge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVacge, kFpNeon); + os() << ToCString(kVacge) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vacge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVacge, kFpNeon); + os() << ToCString(kVacge) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vacgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVacgt, kFpNeon); + os() << ToCString(kVacgt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vacgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVacgt, kFpNeon); + os() << ToCString(kVacgt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vacle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVacle, kFpNeon); + os() << ToCString(kVacle) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vacle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVacle, kFpNeon); + os() << ToCString(kVacle) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vaclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVaclt, kFpNeon); + os() << ToCString(kVaclt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vaclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVaclt, kFpNeon); + os() << ToCString(kVaclt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVadd, kFpNeon); + os() << ToCString(kVadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVadd, kFpNeon); + os() << ToCString(kVadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vadd( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVadd, kFpNeon); + os() << ToCString(kVadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vaddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVaddhn, kFpNeon); + os() << ToCString(kVaddhn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vaddl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVaddl, kFpNeon); + os() << ToCString(kVaddl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vaddw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVaddw, kFpNeon); + os() << ToCString(kVaddw) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vand(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + os().SetCurrentInstruction(kVand, kFpNeon); + os() << ToCString(kVand) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::vand(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + os().SetCurrentInstruction(kVand, kFpNeon); + os() << ToCString(kVand) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::vbic(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + os().SetCurrentInstruction(kVbic, kFpNeon); + os() << ToCString(kVbic) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::vbic(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + os().SetCurrentInstruction(kVbic, kFpNeon); + os() << ToCString(kVbic) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::vbif( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVbif, kFpNeon); + os() << ToCString(kVbif) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vbif( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVbif, kFpNeon); + os() << ToCString(kVbif) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vbit( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVbit, kFpNeon); + os() << ToCString(kVbit) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vbit( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVbit, kFpNeon); + os() << ToCString(kVbit) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vbsl( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVbsl, kFpNeon); + os() << ToCString(kVbsl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vbsl( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVbsl, kFpNeon); + os() << ToCString(kVbsl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vceq(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVceq, kFpNeon); + os() << ToCString(kVceq) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vceq(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVceq, kFpNeon); + os() << ToCString(kVceq) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vceq( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVceq, kFpNeon); + os() << ToCString(kVceq) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vceq( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVceq, kFpNeon); + os() << ToCString(kVceq) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vcge(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVcge, kFpNeon); + os() << ToCString(kVcge) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vcge(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVcge, kFpNeon); + os() << ToCString(kVcge) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vcge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVcge, kFpNeon); + os() << ToCString(kVcge) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vcge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVcge, kFpNeon); + os() << ToCString(kVcge) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vcgt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVcgt, kFpNeon); + os() << ToCString(kVcgt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vcgt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVcgt, kFpNeon); + os() << ToCString(kVcgt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vcgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVcgt, kFpNeon); + os() << ToCString(kVcgt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vcgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVcgt, kFpNeon); + os() << ToCString(kVcgt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vcle(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVcle, kFpNeon); + os() << ToCString(kVcle) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vcle(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVcle, kFpNeon); + os() << ToCString(kVcle) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vcle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVcle, kFpNeon); + os() << ToCString(kVcle) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vcle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVcle, kFpNeon); + os() << ToCString(kVcle) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vcls(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcls, kFpNeon); + os() << ToCString(kVcls) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vcls(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVcls, kFpNeon); + os() << ToCString(kVcls) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vclt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVclt, kFpNeon); + os() << ToCString(kVclt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vclt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVclt, kFpNeon); + os() << ToCString(kVclt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVclt, kFpNeon); + os() << ToCString(kVclt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVclt, kFpNeon); + os() << ToCString(kVclt) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vclz(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVclz, kFpNeon); + os() << ToCString(kVclz) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vclz(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVclz, kFpNeon); + os() << ToCString(kVclz) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vcmp(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + os().SetCurrentInstruction(kVcmp, kFpNeon); + os() << ToCString(kVcmp) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << operand; +} + +void Disassembler::vcmp(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + os().SetCurrentInstruction(kVcmp, kFpNeon); + os() << ToCString(kVcmp) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << operand; +} + +void Disassembler::vcmpe(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + os().SetCurrentInstruction(kVcmpe, kFpNeon); + os() << ToCString(kVcmpe) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << operand; +} + +void Disassembler::vcmpe(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + os().SetCurrentInstruction(kVcmpe, kFpNeon); + os() << ToCString(kVcmpe) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << operand; +} + +void Disassembler::vcnt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcnt, kFpNeon); + os() << ToCString(kVcnt) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vcnt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVcnt, kFpNeon); + os() << ToCString(kVcnt) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvt(Condition cond, + DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm, + int32_t fbits) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm << ", " << SignedImmediatePrinter(fbits); +} + +void Disassembler::vcvt(Condition cond, + DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm, + int32_t fbits) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm << ", " << SignedImmediatePrinter(fbits); +} + +void Disassembler::vcvt(Condition cond, + DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm, + int32_t fbits) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm << ", " << SignedImmediatePrinter(fbits); +} + +void Disassembler::vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, QRegister rm) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVcvt, kFpNeon); + os() << ToCString(kVcvt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvta(DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcvta, kFpNeon); + os() << ToCString(kVcvta) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvta(DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVcvta, kFpNeon); + os() << ToCString(kVcvta) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvta(DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVcvta, kFpNeon); + os() << ToCString(kVcvta) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvta(DataType dt1, + DataType dt2, + SRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcvta, kFpNeon); + os() << ToCString(kVcvta) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVcvtb, kFpNeon); + os() << ToCString(kVcvtb) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvtb( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVcvtb, kFpNeon); + os() << ToCString(kVcvtb) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVcvtb, kFpNeon); + os() << ToCString(kVcvtb) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvtm(DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcvtm, kFpNeon); + os() << ToCString(kVcvtm) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtm(DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVcvtm, kFpNeon); + os() << ToCString(kVcvtm) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtm(DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVcvtm, kFpNeon); + os() << ToCString(kVcvtm) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtm(DataType dt1, + DataType dt2, + SRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcvtm, kFpNeon); + os() << ToCString(kVcvtm) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtn(DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcvtn, kFpNeon); + os() << ToCString(kVcvtn) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtn(DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVcvtn, kFpNeon); + os() << ToCString(kVcvtn) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtn(DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVcvtn, kFpNeon); + os() << ToCString(kVcvtn) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtn(DataType dt1, + DataType dt2, + SRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcvtn, kFpNeon); + os() << ToCString(kVcvtn) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtp(DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcvtp, kFpNeon); + os() << ToCString(kVcvtp) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtp(DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVcvtp, kFpNeon); + os() << ToCString(kVcvtp) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtp(DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVcvtp, kFpNeon); + os() << ToCString(kVcvtp) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtp(DataType dt1, + DataType dt2, + SRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVcvtp, kFpNeon); + os() << ToCString(kVcvtp) << dt1 << dt2 << " " << rd << ", " << rm; +} + +void Disassembler::vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVcvtr, kFpNeon); + os() << ToCString(kVcvtr) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVcvtr, kFpNeon); + os() << ToCString(kVcvtr) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVcvtt, kFpNeon); + os() << ToCString(kVcvtt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvtt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVcvtt, kFpNeon); + os() << ToCString(kVcvtt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVcvtt, kFpNeon); + os() << ToCString(kVcvtt) << ConditionPrinter(it_block_, cond) << dt1 << dt2 + << " " << rd << ", " << rm; +} + +void Disassembler::vdiv( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVdiv, kFpNeon); + os() << ToCString(kVdiv) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vdiv( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVdiv, kFpNeon); + os() << ToCString(kVdiv) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vdup(Condition cond, + DataType dt, + QRegister rd, + Register rt) { + os().SetCurrentInstruction(kVdup, kFpNeon); + os() << ToCString(kVdup) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rt; +} + +void Disassembler::vdup(Condition cond, + DataType dt, + DRegister rd, + Register rt) { + os().SetCurrentInstruction(kVdup, kFpNeon); + os() << ToCString(kVdup) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rt; +} + +void Disassembler::vdup(Condition cond, + DataType dt, + DRegister rd, + DRegisterLane rm) { + os().SetCurrentInstruction(kVdup, kFpNeon); + os() << ToCString(kVdup) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vdup(Condition cond, + DataType dt, + QRegister rd, + DRegisterLane rm) { + os().SetCurrentInstruction(kVdup, kFpNeon); + os() << ToCString(kVdup) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::veor( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVeor, kFpNeon); + os() << ToCString(kVeor) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::veor( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVeor, kFpNeon); + os() << ToCString(kVeor) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vext(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVext, kFpNeon); + os() << ToCString(kVext) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm << ", " << operand; +} + +void Disassembler::vext(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVext, kFpNeon); + os() << ToCString(kVext) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm << ", " << operand; +} + +void Disassembler::vfma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVfma, kFpNeon); + os() << ToCString(kVfma) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vfma( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVfma, kFpNeon); + os() << ToCString(kVfma) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vfma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVfma, kFpNeon); + os() << ToCString(kVfma) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vfms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVfms, kFpNeon); + os() << ToCString(kVfms) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vfms( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVfms, kFpNeon); + os() << ToCString(kVfms) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vfms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVfms, kFpNeon); + os() << ToCString(kVfms) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vfnma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVfnma, kFpNeon); + os() << ToCString(kVfnma) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vfnma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVfnma, kFpNeon); + os() << ToCString(kVfnma) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vfnms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVfnms, kFpNeon); + os() << ToCString(kVfnms) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vfnms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVfnms, kFpNeon); + os() << ToCString(kVfnms) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVhadd, kFpNeon); + os() << ToCString(kVhadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVhadd, kFpNeon); + os() << ToCString(kVhadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vhsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVhsub, kFpNeon); + os() << ToCString(kVhsub) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vhsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVhsub, kFpNeon); + os() << ToCString(kVhsub) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vld1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + os().SetCurrentInstruction(kVld1, kFpNeon); + os() << ToCString(kVld1) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintAlignedMemOperand(kVld1Location, operand); +} + +void Disassembler::vld2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + os().SetCurrentInstruction(kVld2, kFpNeon); + os() << ToCString(kVld2) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintAlignedMemOperand(kVld2Location, operand); +} + +void Disassembler::vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + os().SetCurrentInstruction(kVld3, kFpNeon); + os() << ToCString(kVld3) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintAlignedMemOperand(kVld3Location, operand); +} + +void Disassembler::vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + os().SetCurrentInstruction(kVld3, kFpNeon); + os() << ToCString(kVld3) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintMemOperand(kVld3Location, operand); +} + +void Disassembler::vld4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + os().SetCurrentInstruction(kVld4, kFpNeon); + os() << ToCString(kVld4) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintAlignedMemOperand(kVld4Location, operand); +} + +void Disassembler::vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kVldm, kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVldm) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << dreglist; +} + +void Disassembler::vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + os().SetCurrentInstruction(kVldm, kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVldm) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << sreglist; +} + +void Disassembler::vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kVldmdb, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVldmdb) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << dreglist; +} + +void Disassembler::vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + os().SetCurrentInstruction(kVldmdb, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVldmdb) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << sreglist; +} + +void Disassembler::vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kVldmia, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVldmia) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << dreglist; +} + +void Disassembler::vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + os().SetCurrentInstruction(kVldmia, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVldmia) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << sreglist; +} + +void Disassembler::vldr(Condition cond, + DataType dt, + DRegister rd, + Location* location) { + os().SetCurrentInstruction(kVldr, kFpNeon); + os() << ToCString(kVldr) << ConditionPrinter(it_block_, cond) + << DtPrinter(dt, Untyped64) << " " << rd << ", " + << PrintLabel(kLoadDoublePrecisionLocation, + location, + GetCodeAddress() & ~3); +} + +void Disassembler::vldr(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand) { + os().SetCurrentInstruction(kVldr, kFpNeon); + os() << ToCString(kVldr) << ConditionPrinter(it_block_, cond) + << DtPrinter(dt, Untyped64) << " " << rd << ", " + << PrintMemOperand(kLoadDoublePrecisionLocation, operand); +} + +void Disassembler::vldr(Condition cond, + DataType dt, + SRegister rd, + Location* location) { + os().SetCurrentInstruction(kVldr, kFpNeon); + os() << ToCString(kVldr) << ConditionPrinter(it_block_, cond) + << DtPrinter(dt, Untyped32) << " " << rd << ", " + << PrintLabel(kLoadSinglePrecisionLocation, + location, + GetCodeAddress() & ~3); +} + +void Disassembler::vldr(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand) { + os().SetCurrentInstruction(kVldr, kFpNeon); + os() << ToCString(kVldr) << ConditionPrinter(it_block_, cond) + << DtPrinter(dt, Untyped32) << " " << rd << ", " + << PrintMemOperand(kLoadSinglePrecisionLocation, operand); +} + +void Disassembler::vmax( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVmax, kFpNeon); + os() << ToCString(kVmax) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vmax( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVmax, kFpNeon); + os() << ToCString(kVmax) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vmaxnm(DataType dt, + DRegister rd, + DRegister rn, + DRegister rm) { + os().SetCurrentInstruction(kVmaxnm, kFpNeon); + os() << ToCString(kVmaxnm) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmaxnm(DataType dt, + QRegister rd, + QRegister rn, + QRegister rm) { + os().SetCurrentInstruction(kVmaxnm, kFpNeon); + os() << ToCString(kVmaxnm) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmaxnm(DataType dt, + SRegister rd, + SRegister rn, + SRegister rm) { + os().SetCurrentInstruction(kVmaxnm, kFpNeon); + os() << ToCString(kVmaxnm) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVmin, kFpNeon); + os() << ToCString(kVmin) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vmin( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVmin, kFpNeon); + os() << ToCString(kVmin) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vminnm(DataType dt, + DRegister rd, + DRegister rn, + DRegister rm) { + os().SetCurrentInstruction(kVminnm, kFpNeon); + os() << ToCString(kVminnm) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vminnm(DataType dt, + QRegister rd, + QRegister rn, + QRegister rm) { + os().SetCurrentInstruction(kVminnm, kFpNeon); + os() << ToCString(kVminnm) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vminnm(DataType dt, + SRegister rd, + SRegister rn, + SRegister rm) { + os().SetCurrentInstruction(kVminnm, kFpNeon); + os() << ToCString(kVminnm) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVmla, kFpNeon); + os() << ToCString(kVmla) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmla( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVmla, kFpNeon); + os() << ToCString(kVmla) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVmla, kFpNeon); + os() << ToCString(kVmla) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmla( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVmla, kFpNeon); + os() << ToCString(kVmla) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVmla, kFpNeon); + os() << ToCString(kVmla) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVmlal, kFpNeon); + os() << ToCString(kVmlal) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVmlal, kFpNeon); + os() << ToCString(kVmlal) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVmls, kFpNeon); + os() << ToCString(kVmls) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmls( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVmls, kFpNeon); + os() << ToCString(kVmls) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVmls, kFpNeon); + os() << ToCString(kVmls) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmls( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVmls, kFpNeon); + os() << ToCString(kVmls) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVmls, kFpNeon); + os() << ToCString(kVmls) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVmlsl, kFpNeon); + os() << ToCString(kVmlsl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVmlsl, kFpNeon); + os() << ToCString(kVmlsl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmov(Condition cond, Register rt, SRegister rn) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << rn; +} + +void Disassembler::vmov(Condition cond, SRegister rn, Register rt) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << " " << rn + << ", " << rt; +} + +void Disassembler::vmov(Condition cond, + Register rt, + Register rt2, + DRegister rm) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << rt2 << ", " << rm; +} + +void Disassembler::vmov(Condition cond, + DRegister rm, + Register rt, + Register rt2) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << " " << rm + << ", " << rt << ", " << rt2; +} + +void Disassembler::vmov( + Condition cond, Register rt, Register rt2, SRegister rm, SRegister rm1) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << rt2 << ", " << rm << ", " << rm1; +} + +void Disassembler::vmov( + Condition cond, SRegister rm, SRegister rm1, Register rt, Register rt2) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << " " << rm + << ", " << rm1 << ", " << rt << ", " << rt2; +} + +void Disassembler::vmov(Condition cond, + DataType dt, + DRegisterLane rd, + Register rt) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rt; +} + +void Disassembler::vmov(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << operand; +} + +void Disassembler::vmov(Condition cond, + DataType dt, + QRegister rd, + const QOperand& operand) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << operand; +} + +void Disassembler::vmov(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << operand; +} + +void Disassembler::vmov(Condition cond, + DataType dt, + Register rt, + DRegisterLane rn) { + os().SetCurrentInstruction(kVmov, kFpNeon); + os() << ToCString(kVmov) << ConditionPrinter(it_block_, cond) << dt << " " + << rt << ", " << rn; +} + +void Disassembler::vmovl(Condition cond, + DataType dt, + QRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVmovl, kFpNeon); + os() << ToCString(kVmovl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vmovn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVmovn, kFpNeon); + os() << ToCString(kVmovn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vmrs(Condition cond, + RegisterOrAPSR_nzcv rt, + SpecialFPRegister spec_reg) { + os().SetCurrentInstruction(kVmrs, kFpNeon); + os() << ToCString(kVmrs) << ConditionPrinter(it_block_, cond) << " " << rt + << ", " << spec_reg; +} + +void Disassembler::vmsr(Condition cond, + SpecialFPRegister spec_reg, + Register rt) { + os().SetCurrentInstruction(kVmsr, kFpNeon); + os() << ToCString(kVmsr) << ConditionPrinter(it_block_, cond) << " " + << spec_reg << ", " << rt; +} + +void Disassembler::vmul(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + os().SetCurrentInstruction(kVmul, kFpNeon); + os() << ToCString(kVmul) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << IndexedRegisterPrinter(dm, index); +} + +void Disassembler::vmul(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegister dm, + unsigned index) { + os().SetCurrentInstruction(kVmul, kFpNeon); + os() << ToCString(kVmul) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << IndexedRegisterPrinter(dm, index); +} + +void Disassembler::vmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVmul, kFpNeon); + os() << ToCString(kVmul) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vmul( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVmul, kFpNeon); + os() << ToCString(kVmul) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVmul, kFpNeon); + os() << ToCString(kVmul) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vmull(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + os().SetCurrentInstruction(kVmull, kFpNeon); + os() << ToCString(kVmull) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << IndexedRegisterPrinter(dm, index); +} + +void Disassembler::vmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVmull, kFpNeon); + os() << ToCString(kVmull) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vmvn(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + os().SetCurrentInstruction(kVmvn, kFpNeon); + os() << ToCString(kVmvn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << operand; +} + +void Disassembler::vmvn(Condition cond, + DataType dt, + QRegister rd, + const QOperand& operand) { + os().SetCurrentInstruction(kVmvn, kFpNeon); + os() << ToCString(kVmvn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << operand; +} + +void Disassembler::vneg(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVneg, kFpNeon); + os() << ToCString(kVneg) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vneg(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVneg, kFpNeon); + os() << ToCString(kVneg) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vneg(Condition cond, + DataType dt, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVneg, kFpNeon); + os() << ToCString(kVneg) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vnmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVnmla, kFpNeon); + os() << ToCString(kVnmla) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vnmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVnmla, kFpNeon); + os() << ToCString(kVnmla) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vnmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVnmls, kFpNeon); + os() << ToCString(kVnmls) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vnmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVnmls, kFpNeon); + os() << ToCString(kVnmls) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vnmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVnmul, kFpNeon); + os() << ToCString(kVnmul) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vnmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVnmul, kFpNeon); + os() << ToCString(kVnmul) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vorn(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + os().SetCurrentInstruction(kVorn, kFpNeon); + os() << ToCString(kVorn) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::vorn(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + os().SetCurrentInstruction(kVorn, kFpNeon); + os() << ToCString(kVorn) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::vorr(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + os().SetCurrentInstruction(kVorr, kFpNeon); + os() << ToCString(kVorr) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::vorr(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + os().SetCurrentInstruction(kVorr, kFpNeon); + os() << ToCString(kVorr) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << operand; +} + +void Disassembler::vpadal(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVpadal, kFpNeon); + os() << ToCString(kVpadal) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vpadal(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVpadal, kFpNeon); + os() << ToCString(kVpadal) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vpadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVpadd, kFpNeon); + os() << ToCString(kVpadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vpaddl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVpaddl, kFpNeon); + os() << ToCString(kVpaddl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vpaddl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVpaddl, kFpNeon); + os() << ToCString(kVpaddl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vpmax( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVpmax, kFpNeon); + os() << ToCString(kVpmax) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vpmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVpmin, kFpNeon); + os() << ToCString(kVpmin) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vpop(Condition cond, DataType dt, DRegisterList dreglist) { + os().SetCurrentInstruction(kVpop, kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVpop) << ConditionPrinter(it_block_, cond) << dt << " " + << dreglist; +} + +void Disassembler::vpop(Condition cond, DataType dt, SRegisterList sreglist) { + os().SetCurrentInstruction(kVpop, kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVpop) << ConditionPrinter(it_block_, cond) << dt << " " + << sreglist; +} + +void Disassembler::vpush(Condition cond, DataType dt, DRegisterList dreglist) { + os().SetCurrentInstruction(kVpush, kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVpush) << ConditionPrinter(it_block_, cond) << dt << " " + << dreglist; +} + +void Disassembler::vpush(Condition cond, DataType dt, SRegisterList sreglist) { + os().SetCurrentInstruction(kVpush, kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVpush) << ConditionPrinter(it_block_, cond) << dt << " " + << sreglist; +} + +void Disassembler::vqabs(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVqabs, kFpNeon); + os() << ToCString(kVqabs) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vqabs(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVqabs, kFpNeon); + os() << ToCString(kVqabs) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vqadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVqadd, kFpNeon); + os() << ToCString(kVqadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVqadd, kFpNeon); + os() << ToCString(kVqadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqdmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVqdmlal, kFpNeon); + os() << ToCString(kVqdmlal) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vqdmlal(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + os().SetCurrentInstruction(kVqdmlal, kFpNeon); + os() << ToCString(kVqdmlal) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << IndexedRegisterPrinter(dm, index); +} + +void Disassembler::vqdmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVqdmlsl, kFpNeon); + os() << ToCString(kVqdmlsl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vqdmlsl(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + os().SetCurrentInstruction(kVqdmlsl, kFpNeon); + os() << ToCString(kVqdmlsl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << IndexedRegisterPrinter(dm, index); +} + +void Disassembler::vqdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVqdmulh, kFpNeon); + os() << ToCString(kVqdmulh) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVqdmulh, kFpNeon); + os() << ToCString(kVqdmulh) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVqdmulh, kFpNeon); + os() << ToCString(kVqdmulh) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVqdmulh, kFpNeon); + os() << ToCString(kVqdmulh) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqdmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVqdmull, kFpNeon); + os() << ToCString(kVqdmull) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vqdmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVqdmull, kFpNeon); + os() << ToCString(kVqdmull) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vqmovn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVqmovn, kFpNeon); + os() << ToCString(kVqmovn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vqmovun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVqmovun, kFpNeon); + os() << ToCString(kVqmovun) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vqneg(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVqneg, kFpNeon); + os() << ToCString(kVqneg) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vqneg(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVqneg, kFpNeon); + os() << ToCString(kVqneg) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vqrdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVqrdmulh, kFpNeon); + os() << ToCString(kVqrdmulh) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqrdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVqrdmulh, kFpNeon); + os() << ToCString(kVqrdmulh) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqrdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVqrdmulh, kFpNeon); + os() << ToCString(kVqrdmulh) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqrdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + os().SetCurrentInstruction(kVqrdmulh, kFpNeon); + os() << ToCString(kVqrdmulh) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn) { + os().SetCurrentInstruction(kVqrshl, kFpNeon); + os() << ToCString(kVqrshl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << rn; +} + +void Disassembler::vqrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn) { + os().SetCurrentInstruction(kVqrshl, kFpNeon); + os() << ToCString(kVqrshl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << rn; +} + +void Disassembler::vqrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVqrshrn, kFpNeon); + os() << ToCString(kVqrshrn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm << ", " << operand; +} + +void Disassembler::vqrshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVqrshrun, kFpNeon); + os() << ToCString(kVqrshrun) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm << ", " << operand; +} + +void Disassembler::vqshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVqshl, kFpNeon); + os() << ToCString(kVqshl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vqshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVqshl, kFpNeon); + os() << ToCString(kVqshl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vqshlu(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVqshlu, kFpNeon); + os() << ToCString(kVqshlu) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vqshlu(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVqshlu, kFpNeon); + os() << ToCString(kVqshlu) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vqshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVqshrn, kFpNeon); + os() << ToCString(kVqshrn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm << ", " << operand; +} + +void Disassembler::vqshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVqshrun, kFpNeon); + os() << ToCString(kVqshrun) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm << ", " << operand; +} + +void Disassembler::vqsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVqsub, kFpNeon); + os() << ToCString(kVqsub) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vqsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVqsub, kFpNeon); + os() << ToCString(kVqsub) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vraddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVraddhn, kFpNeon); + os() << ToCString(kVraddhn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vrecpe(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVrecpe, kFpNeon); + os() << ToCString(kVrecpe) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrecpe(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVrecpe, kFpNeon); + os() << ToCString(kVrecpe) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrecps( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVrecps, kFpNeon); + os() << ToCString(kVrecps) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vrecps( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVrecps, kFpNeon); + os() << ToCString(kVrecps) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vrev16(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVrev16, kFpNeon); + os() << ToCString(kVrev16) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrev16(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVrev16, kFpNeon); + os() << ToCString(kVrev16) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrev32(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVrev32, kFpNeon); + os() << ToCString(kVrev32) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrev32(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVrev32, kFpNeon); + os() << ToCString(kVrev32) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrev64(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVrev64, kFpNeon); + os() << ToCString(kVrev64) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrev64(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVrev64, kFpNeon); + os() << ToCString(kVrev64) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVrhadd, kFpNeon); + os() << ToCString(kVrhadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vrhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVrhadd, kFpNeon); + os() << ToCString(kVrhadd) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vrinta(DataType dt, DRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVrinta, kFpNeon); + os() << ToCString(kVrinta) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrinta(DataType dt, QRegister rd, QRegister rm) { + os().SetCurrentInstruction(kVrinta, kFpNeon); + os() << ToCString(kVrinta) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrinta(DataType dt, SRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVrinta, kFpNeon); + os() << ToCString(kVrinta) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintm(DataType dt, DRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVrintm, kFpNeon); + os() << ToCString(kVrintm) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintm(DataType dt, QRegister rd, QRegister rm) { + os().SetCurrentInstruction(kVrintm, kFpNeon); + os() << ToCString(kVrintm) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintm(DataType dt, SRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVrintm, kFpNeon); + os() << ToCString(kVrintm) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintn(DataType dt, DRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVrintn, kFpNeon); + os() << ToCString(kVrintn) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintn(DataType dt, QRegister rd, QRegister rm) { + os().SetCurrentInstruction(kVrintn, kFpNeon); + os() << ToCString(kVrintn) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintn(DataType dt, SRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVrintn, kFpNeon); + os() << ToCString(kVrintn) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintp(DataType dt, DRegister rd, DRegister rm) { + os().SetCurrentInstruction(kVrintp, kFpNeon); + os() << ToCString(kVrintp) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintp(DataType dt, QRegister rd, QRegister rm) { + os().SetCurrentInstruction(kVrintp, kFpNeon); + os() << ToCString(kVrintp) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintp(DataType dt, SRegister rd, SRegister rm) { + os().SetCurrentInstruction(kVrintp, kFpNeon); + os() << ToCString(kVrintp) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintr(Condition cond, + DataType dt, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVrintr, kFpNeon); + os() << ToCString(kVrintr) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrintr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVrintr, kFpNeon); + os() << ToCString(kVrintr) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrintx(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVrintx, kFpNeon); + os() << ToCString(kVrintx) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrintx(DataType dt, QRegister rd, QRegister rm) { + os().SetCurrentInstruction(kVrintx, kFpNeon); + os() << ToCString(kVrintx) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintx(Condition cond, + DataType dt, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVrintx, kFpNeon); + os() << ToCString(kVrintx) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrintz(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVrintz, kFpNeon); + os() << ToCString(kVrintz) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrintz(DataType dt, QRegister rd, QRegister rm) { + os().SetCurrentInstruction(kVrintz, kFpNeon); + os() << ToCString(kVrintz) << dt << " " << rd << ", " << rm; +} + +void Disassembler::vrintz(Condition cond, + DataType dt, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVrintz, kFpNeon); + os() << ToCString(kVrintz) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn) { + os().SetCurrentInstruction(kVrshl, kFpNeon); + os() << ToCString(kVrshl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << rn; +} + +void Disassembler::vrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn) { + os().SetCurrentInstruction(kVrshl, kFpNeon); + os() << ToCString(kVrshl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << rn; +} + +void Disassembler::vrshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVrshr, kFpNeon); + os() << ToCString(kVrshr) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vrshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVrshr, kFpNeon); + os() << ToCString(kVrshr) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVrshrn, kFpNeon); + os() << ToCString(kVrshrn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm << ", " << operand; +} + +void Disassembler::vrsqrte(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVrsqrte, kFpNeon); + os() << ToCString(kVrsqrte) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrsqrte(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVrsqrte, kFpNeon); + os() << ToCString(kVrsqrte) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vrsqrts( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVrsqrts, kFpNeon); + os() << ToCString(kVrsqrts) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vrsqrts( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVrsqrts, kFpNeon); + os() << ToCString(kVrsqrts) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vrsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVrsra, kFpNeon); + os() << ToCString(kVrsra) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vrsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVrsra, kFpNeon); + os() << ToCString(kVrsra) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vrsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVrsubhn, kFpNeon); + os() << ToCString(kVrsubhn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vseleq(DataType dt, + DRegister rd, + DRegister rn, + DRegister rm) { + os().SetCurrentInstruction(kVseleq, kFpNeon); + os() << ToCString(kVseleq) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vseleq(DataType dt, + SRegister rd, + SRegister rn, + SRegister rm) { + os().SetCurrentInstruction(kVseleq, kFpNeon); + os() << ToCString(kVseleq) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vselge(DataType dt, + DRegister rd, + DRegister rn, + DRegister rm) { + os().SetCurrentInstruction(kVselge, kFpNeon); + os() << ToCString(kVselge) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vselge(DataType dt, + SRegister rd, + SRegister rn, + SRegister rm) { + os().SetCurrentInstruction(kVselge, kFpNeon); + os() << ToCString(kVselge) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vselgt(DataType dt, + DRegister rd, + DRegister rn, + DRegister rm) { + os().SetCurrentInstruction(kVselgt, kFpNeon); + os() << ToCString(kVselgt) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vselgt(DataType dt, + SRegister rd, + SRegister rn, + SRegister rm) { + os().SetCurrentInstruction(kVselgt, kFpNeon); + os() << ToCString(kVselgt) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vselvs(DataType dt, + DRegister rd, + DRegister rn, + DRegister rm) { + os().SetCurrentInstruction(kVselvs, kFpNeon); + os() << ToCString(kVselvs) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vselvs(DataType dt, + SRegister rd, + SRegister rn, + SRegister rm) { + os().SetCurrentInstruction(kVselvs, kFpNeon); + os() << ToCString(kVselvs) << dt << " " << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVshl, kFpNeon); + os() << ToCString(kVshl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVshl, kFpNeon); + os() << ToCString(kVshl) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vshll(Condition cond, + DataType dt, + QRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVshll, kFpNeon); + os() << ToCString(kVshll) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm << ", " << operand; +} + +void Disassembler::vshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVshr, kFpNeon); + os() << ToCString(kVshr) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVshr, kFpNeon); + os() << ToCString(kVshr) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVshrn, kFpNeon); + os() << ToCString(kVshrn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm << ", " << operand; +} + +void Disassembler::vsli(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVsli, kFpNeon); + os() << ToCString(kVsli) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vsli(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVsli, kFpNeon); + os() << ToCString(kVsli) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vsqrt(Condition cond, + DataType dt, + SRegister rd, + SRegister rm) { + os().SetCurrentInstruction(kVsqrt, kFpNeon); + os() << ToCString(kVsqrt) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vsqrt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVsqrt, kFpNeon); + os() << ToCString(kVsqrt) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVsra, kFpNeon); + os() << ToCString(kVsra) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVsra, kFpNeon); + os() << ToCString(kVsra) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vsri(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + os().SetCurrentInstruction(kVsri, kFpNeon); + os() << ToCString(kVsri) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vsri(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + os().SetCurrentInstruction(kVsri, kFpNeon); + os() << ToCString(kVsri) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rm) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rm << ", " << operand; +} + +void Disassembler::vst1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + os().SetCurrentInstruction(kVst1, kFpNeon); + os() << ToCString(kVst1) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintAlignedMemOperand(kVst1Location, operand); +} + +void Disassembler::vst2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + os().SetCurrentInstruction(kVst2, kFpNeon); + os() << ToCString(kVst2) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintAlignedMemOperand(kVst2Location, operand); +} + +void Disassembler::vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + os().SetCurrentInstruction(kVst3, kFpNeon); + os() << ToCString(kVst3) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintAlignedMemOperand(kVst3Location, operand); +} + +void Disassembler::vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + os().SetCurrentInstruction(kVst3, kFpNeon); + os() << ToCString(kVst3) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintMemOperand(kVst3Location, operand); +} + +void Disassembler::vst4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + os().SetCurrentInstruction(kVst4, kFpNeon); + os() << ToCString(kVst4) << ConditionPrinter(it_block_, cond) << dt << " " + << nreglist << ", " << PrintAlignedMemOperand(kVst4Location, operand); +} + +void Disassembler::vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kVstm, kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVstm) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << dreglist; +} + +void Disassembler::vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + os().SetCurrentInstruction(kVstm, kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVstm) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << sreglist; +} + +void Disassembler::vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kVstmdb, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVstmdb) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << dreglist; +} + +void Disassembler::vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + os().SetCurrentInstruction(kVstmdb, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVstmdb) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << sreglist; +} + +void Disassembler::vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + os().SetCurrentInstruction(kVstmia, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVstmia) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << dreglist; +} + +void Disassembler::vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + os().SetCurrentInstruction(kVstmia, + kLoadStore | kLoadStoreMultiple | kFpNeon); + os() << ToCString(kVstmia) << ConditionPrinter(it_block_, cond) << dt << " " + << rn << write_back << ", " << sreglist; +} + +void Disassembler::vstr(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand) { + os().SetCurrentInstruction(kVstr, kFpNeon); + os() << ToCString(kVstr) << ConditionPrinter(it_block_, cond) + << DtPrinter(dt, Untyped64) << " " << rd << ", " + << PrintMemOperand(kStoreDoublePrecisionLocation, operand); +} + +void Disassembler::vstr(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand) { + os().SetCurrentInstruction(kVstr, kFpNeon); + os() << ToCString(kVstr) << ConditionPrinter(it_block_, cond) + << DtPrinter(dt, Untyped32) << " " << rd << ", " + << PrintMemOperand(kStoreSinglePrecisionLocation, operand); +} + +void Disassembler::vsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVsub, kFpNeon); + os() << ToCString(kVsub) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVsub, kFpNeon); + os() << ToCString(kVsub) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vsub( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + os().SetCurrentInstruction(kVsub, kFpNeon); + os() << ToCString(kVsub) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVsubhn, kFpNeon); + os() << ToCString(kVsubhn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vsubl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVsubl, kFpNeon); + os() << ToCString(kVsubl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rn << ", " << rm; +} + +void Disassembler::vsubw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVsubw, kFpNeon); + os() << ToCString(kVsubw) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vswp(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVswp, kFpNeon); + os() << ToCString(kVswp) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vswp(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVswp, kFpNeon); + os() << ToCString(kVswp) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vtbl(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + os().SetCurrentInstruction(kVtbl, kFpNeon); + os() << ToCString(kVtbl) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << nreglist << ", " << rm; +} + +void Disassembler::vtbx(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + os().SetCurrentInstruction(kVtbx, kFpNeon); + os() << ToCString(kVtbx) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << nreglist << ", " << rm; +} + +void Disassembler::vtrn(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVtrn, kFpNeon); + os() << ToCString(kVtrn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vtrn(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVtrn, kFpNeon); + os() << ToCString(kVtrn) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vtst( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + os().SetCurrentInstruction(kVtst, kFpNeon); + os() << ToCString(kVtst) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vtst( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + os().SetCurrentInstruction(kVtst, kFpNeon); + os() << ToCString(kVtst) << ConditionPrinter(it_block_, cond) << dt; + os() << " "; + if (!rd.Is(rn) || !use_short_hand_form_) { + os() << rd << ", "; + } + os() << rn << ", " << rm; +} + +void Disassembler::vuzp(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVuzp, kFpNeon); + os() << ToCString(kVuzp) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vuzp(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVuzp, kFpNeon); + os() << ToCString(kVuzp) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vzip(Condition cond, + DataType dt, + DRegister rd, + DRegister rm) { + os().SetCurrentInstruction(kVzip, kFpNeon); + os() << ToCString(kVzip) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::vzip(Condition cond, + DataType dt, + QRegister rd, + QRegister rm) { + os().SetCurrentInstruction(kVzip, kFpNeon); + os() << ToCString(kVzip) << ConditionPrinter(it_block_, cond) << dt << " " + << rd << ", " << rm; +} + +void Disassembler::yield(Condition cond, EncodingSize size) { + os().SetCurrentInstruction(kYield, kNoAttribute); + os() << ToCString(kYield) << ConditionPrinter(it_block_, cond) << size; +} + +int Disassembler::T32Size(uint32_t instr) { + if ((instr & 0xe0000000) == 0xe0000000) { + switch (instr & 0x08000000) { + case 0x00000000: + if ((instr & 0x10000000) == 0x10000000) return 4; + return 2; + case 0x08000000: + return 4; + default: + return 2; + } + } + return 2; +} + +void Disassembler::DecodeT32(uint32_t instr) { + T32CodeAddressIncrementer incrementer(instr, &code_address_); + ITBlockScope it_scope(&it_block_); + + switch (instr & 0xe0000000) { + case 0x00000000: { + // 0x00000000 + switch (instr & 0x18000000) { + case 0x18000000: { + // 0x18000000 + switch (instr & 0x06000000) { + case 0x00000000: { + // 0x18000000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rn = (instr >> 19) & 0x7; + unsigned rm = (instr >> 22) & 0x7; + if (InITBlock()) { + // ADD{} , , ; T1 + add(CurrentCond(), + Narrow, + Register(rd), + Register(rn), + Register(rm)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // ADDS{} {}, , ; T1 + adds(Condition::None(), + Narrow, + Register(rd), + Register(rn), + Register(rm)); + } + break; + } + case 0x02000000: { + // 0x1a000000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rn = (instr >> 19) & 0x7; + unsigned rm = (instr >> 22) & 0x7; + if (InITBlock()) { + // SUB{} , , ; T1 + sub(CurrentCond(), + Narrow, + Register(rd), + Register(rn), + Register(rm)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // SUBS{} {}, , ; T1 + subs(Condition::None(), + Narrow, + Register(rd), + Register(rn), + Register(rm)); + } + break; + } + case 0x04000000: { + // 0x1c000000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rn = (instr >> 19) & 0x7; + uint32_t imm = (instr >> 22) & 0x7; + if (InITBlock()) { + // ADD{} , , # ; T1 + add(CurrentCond(), Narrow, Register(rd), Register(rn), imm); + } else { + VIXL_ASSERT(OutsideITBlock()); + // ADDS{} , , # ; T1 + adds(Condition::None(), + Narrow, + Register(rd), + Register(rn), + imm); + } + break; + } + case 0x06000000: { + // 0x1e000000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rn = (instr >> 19) & 0x7; + uint32_t imm = (instr >> 22) & 0x7; + if (InITBlock()) { + // SUB{} , , # ; T1 + sub(CurrentCond(), Narrow, Register(rd), Register(rn), imm); + } else { + VIXL_ASSERT(OutsideITBlock()); + // SUBS{} , , # ; T1 + subs(Condition::None(), + Narrow, + Register(rd), + Register(rn), + imm); + } + break; + } + } + break; + } + default: { + if (((instr & 0x18000000) == 0x18000000)) { + UnallocatedT32(instr); + return; + } + if (((Uint32((instr >> 27)) & Uint32(0x3)) == Uint32(0x2)) && + InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + uint32_t amount = (instr >> 22) & 0x1f; + if (amount == 0) amount = 32; + // ASR{} {}, , # ; T2 + asr(CurrentCond(), Narrow, Register(rd), Register(rm), amount); + return; + } + if (((Uint32((instr >> 27)) & Uint32(0x3)) == Uint32(0x2)) && + !InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + uint32_t amount = (instr >> 22) & 0x1f; + if (amount == 0) amount = 32; + // ASRS{} {}, , # ; T2 + asrs(Condition::None(), Narrow, Register(rd), Register(rm), amount); + return; + } + if (((Uint32((instr >> 27)) & Uint32(0x3)) == Uint32(0x0)) && + ((instr & 0x07c00000) != 0x00000000) && InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + uint32_t amount = (instr >> 22) & 0x1f; + // LSL{} {}, , # ; T2 + lsl(CurrentCond(), Narrow, Register(rd), Register(rm), amount); + return; + } + if (((Uint32((instr >> 27)) & Uint32(0x3)) == Uint32(0x0)) && + ((instr & 0x07c00000) != 0x00000000) && !InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + uint32_t amount = (instr >> 22) & 0x1f; + // LSLS{} {}, , # ; T2 + lsls(Condition::None(), Narrow, Register(rd), Register(rm), amount); + return; + } + if (((Uint32((instr >> 27)) & Uint32(0x3)) == Uint32(0x1)) && + InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + uint32_t amount = (instr >> 22) & 0x1f; + if (amount == 0) amount = 32; + // LSR{} {}, , # ; T2 + lsr(CurrentCond(), Narrow, Register(rd), Register(rm), amount); + return; + } + if (((Uint32((instr >> 27)) & Uint32(0x3)) == Uint32(0x1)) && + !InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + uint32_t amount = (instr >> 22) & 0x1f; + if (amount == 0) amount = 32; + // LSRS{} {}, , # ; T2 + lsrs(Condition::None(), Narrow, Register(rd), Register(rm), amount); + return; + } + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + ImmediateShiftOperand shift_operand((instr >> 27) & 0x3, + (instr >> 22) & 0x1f); + if (InITBlock()) { + // MOV{} , {, # } ; T2 + mov(CurrentCond(), + Narrow, + Register(rd), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + } else { + VIXL_ASSERT(OutsideITBlock()); + // MOVS{} , {, # } ; T2 + movs(Condition::None(), + Narrow, + Register(rd), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + } + break; + } + } + break; + } + case 0x20000000: { + // 0x20000000 + switch (instr & 0x18000000) { + case 0x00000000: { + // 0x20000000 + unsigned rd = (instr >> 24) & 0x7; + uint32_t imm = (instr >> 16) & 0xff; + if (InITBlock()) { + // MOV{} , # ; T1 + mov(CurrentCond(), Narrow, Register(rd), imm); + } else { + VIXL_ASSERT(OutsideITBlock()); + // MOVS{} , # ; T1 + movs(Condition::None(), Narrow, Register(rd), imm); + } + break; + } + case 0x08000000: { + // 0x28000000 + unsigned rn = (instr >> 24) & 0x7; + uint32_t imm = (instr >> 16) & 0xff; + // CMP{}{} , # ; T1 + cmp(CurrentCond(), Narrow, Register(rn), imm); + break; + } + case 0x10000000: { + // 0x30000000 + unsigned rd = (instr >> 24) & 0x7; + uint32_t imm = (instr >> 16) & 0xff; + if (InITBlock() && ((imm <= 7))) { + // ADD{} , # ; T2 + add(CurrentCond(), Register(rd), imm); + } else if (InITBlock() && ((imm > 7))) { + // ADD{} {}, , # ; T2 + add(CurrentCond(), Narrow, Register(rd), Register(rd), imm); + } else if (OutsideITBlock() && ((imm <= 7))) { + // ADDS{} , # ; T2 + adds(Register(rd), imm); + } else if (OutsideITBlock() && ((imm > 7))) { + // ADDS{} {}, , # ; T2 + adds(Condition::None(), Narrow, Register(rd), Register(rd), imm); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x18000000: { + // 0x38000000 + unsigned rd = (instr >> 24) & 0x7; + uint32_t imm = (instr >> 16) & 0xff; + if (InITBlock() && ((imm <= 7))) { + // SUB{} , # ; T2 + sub(CurrentCond(), Register(rd), imm); + } else if (InITBlock() && ((imm > 7))) { + // SUB{} {}, , # ; T2 + sub(CurrentCond(), Narrow, Register(rd), Register(rd), imm); + } else if (OutsideITBlock() && ((imm <= 7))) { + // SUBS{} , # ; T2 + subs(Register(rd), imm); + } else if (OutsideITBlock() && ((imm > 7))) { + // SUBS{} {}, , # ; T2 + subs(Condition::None(), Narrow, Register(rd), Register(rd), imm); + } else { + UnallocatedT32(instr); + } + break; + } + } + break; + } + case 0x40000000: { + // 0x40000000 + switch (instr & 0x18000000) { + case 0x00000000: { + // 0x40000000 + switch (instr & 0x07000000) { + case 0x00000000: { + // 0x40000000 + switch (instr & 0x00c00000) { + case 0x00000000: { + // 0x40000000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + if (InITBlock()) { + // AND{} {}, , ; T1 + and_(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // ANDS{} {}, , ; T1 + ands(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } + break; + } + case 0x00400000: { + // 0x40400000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + if (InITBlock()) { + // EOR{} {}, , ; T1 + eor(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // EORS{} {}, , ; T1 + eors(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } + break; + } + case 0x00800000: { + // 0x40800000 + if (InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + // LSL{} {}, , ; T1 + lsl(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rs)); + return; + } + if (!InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + // LSLS{} {}, , ; T1 + lsls(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rs)); + return; + } + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + if (InITBlock()) { + // MOV{} , , LSL ; T1 + mov(CurrentCond(), + Narrow, + Register(rd), + Operand(Register(rm), LSL, Register(rs))); + } else { + VIXL_ASSERT(OutsideITBlock()); + // MOVS{} , , LSL ; T1 + movs(Condition::None(), + Narrow, + Register(rd), + Operand(Register(rm), LSL, Register(rs))); + } + break; + } + case 0x00c00000: { + // 0x40c00000 + if (InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + // LSR{} {}, , ; T1 + lsr(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rs)); + return; + } + if (!InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + // LSRS{} {}, , ; T1 + lsrs(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rs)); + return; + } + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + if (InITBlock()) { + // MOV{} , , LSR ; T1 + mov(CurrentCond(), + Narrow, + Register(rd), + Operand(Register(rm), LSR, Register(rs))); + } else { + VIXL_ASSERT(OutsideITBlock()); + // MOVS{} , , LSR ; T1 + movs(Condition::None(), + Narrow, + Register(rd), + Operand(Register(rm), LSR, Register(rs))); + } + break; + } + } + break; + } + case 0x01000000: { + // 0x41000000 + switch (instr & 0x00c00000) { + case 0x00000000: { + // 0x41000000 + if (InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + // ASR{} {}, , ; T1 + asr(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rs)); + return; + } + if (!InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + // ASRS{} {}, , ; T1 + asrs(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rs)); + return; + } + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + if (InITBlock()) { + // MOV{} , , ASR ; T1 + mov(CurrentCond(), + Narrow, + Register(rd), + Operand(Register(rm), ASR, Register(rs))); + } else { + VIXL_ASSERT(OutsideITBlock()); + // MOVS{} , , ASR ; T1 + movs(Condition::None(), + Narrow, + Register(rd), + Operand(Register(rm), ASR, Register(rs))); + } + break; + } + case 0x00400000: { + // 0x41400000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + if (InITBlock()) { + // ADC{} {}, , ; T1 + adc(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // ADCS{} {}, , ; T1 + adcs(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } + break; + } + case 0x00800000: { + // 0x41800000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + if (InITBlock()) { + // SBC{} {}, , ; T1 + sbc(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // SBCS{} {}, , ; T1 + sbcs(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } + break; + } + case 0x00c00000: { + // 0x41c00000 + if (InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + // ROR{} {}, , ; T1 + ror(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rs)); + return; + } + if (!InITBlock()) { + unsigned rd = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + // RORS{} {}, , ; T1 + rors(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rs)); + return; + } + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 16) & 0x7; + unsigned rs = (instr >> 19) & 0x7; + if (InITBlock()) { + // MOV{} , , ROR ; T1 + mov(CurrentCond(), + Narrow, + Register(rd), + Operand(Register(rm), ROR, Register(rs))); + } else { + VIXL_ASSERT(OutsideITBlock()); + // MOVS{} , , ROR ; T1 + movs(Condition::None(), + Narrow, + Register(rd), + Operand(Register(rm), ROR, Register(rs))); + } + break; + } + } + break; + } + case 0x02000000: { + // 0x42000000 + switch (instr & 0x00c00000) { + case 0x00000000: { + // 0x42000000 + unsigned rn = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + // TST{}{} , ; T1 + tst(CurrentCond(), Narrow, Register(rn), Register(rm)); + break; + } + case 0x00400000: { + // 0x42400000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rn = (instr >> 19) & 0x7; + if (InITBlock()) { + // RSB{} {}, , #0 ; T1 + rsb(CurrentCond(), + Narrow, + Register(rd), + Register(rn), + UINT32_C(0)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // RSBS{} {}, , #0 ; T1 + rsbs(Condition::None(), + Narrow, + Register(rd), + Register(rn), + UINT32_C(0)); + } + break; + } + case 0x00800000: { + // 0x42800000 + unsigned rn = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + // CMP{}{} , ; T1 + cmp(CurrentCond(), Narrow, Register(rn), Register(rm)); + break; + } + case 0x00c00000: { + // 0x42c00000 + unsigned rn = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + // CMN{}{} , ; T1 + cmn(CurrentCond(), Narrow, Register(rn), Register(rm)); + break; + } + } + break; + } + case 0x03000000: { + // 0x43000000 + switch (instr & 0x00c00000) { + case 0x00000000: { + // 0x43000000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + if (InITBlock()) { + // ORR{} {}, , ; T1 + orr(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // ORRS{} {}, , ; T1 + orrs(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } + break; + } + case 0x00400000: { + // 0x43400000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rn = (instr >> 19) & 0x7; + if (InITBlock()) { + // MUL{} , , {} ; T1 + mul(CurrentCond(), + Narrow, + Register(rd), + Register(rn), + Register(rd)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // MULS{} , , {} ; T1 + muls(Condition::None(), + Register(rd), + Register(rn), + Register(rd)); + } + break; + } + case 0x00800000: { + // 0x43800000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + if (InITBlock()) { + // BIC{} {}, , ; T1 + bic(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // BICS{} {}, , ; T1 + bics(Condition::None(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } + break; + } + case 0x00c00000: { + // 0x43c00000 + unsigned rd = (instr >> 16) & 0x7; + unsigned rm = (instr >> 19) & 0x7; + if (InITBlock()) { + // MVN{} , ; T1 + mvn(CurrentCond(), Narrow, Register(rd), Register(rm)); + } else { + VIXL_ASSERT(OutsideITBlock()); + // MVNS{} , ; T1 + mvns(Condition::None(), Narrow, Register(rd), Register(rm)); + } + break; + } + } + break; + } + case 0x04000000: { + // 0x44000000 + switch (instr & 0x00780000) { + case 0x00680000: { + // 0x44680000 + unsigned rd = ((instr >> 16) & 0x7) | ((instr >> 20) & 0x8); + // ADD{}{} {}, SP, ; T1 + add(CurrentCond(), Narrow, Register(rd), sp, Register(rd)); + break; + } + default: { + switch (instr & 0x00870000) { + case 0x00850000: { + // 0x44850000 + if (((instr & 0x780000) == 0x680000)) { + UnallocatedT32(instr); + return; + } + unsigned rm = (instr >> 19) & 0xf; + // ADD{}{} {SP}, SP, ; T2 + add(CurrentCond(), Narrow, sp, sp, Register(rm)); + break; + } + default: { + if (((instr & 0x780000) == 0x680000) || + ((instr & 0x870000) == 0x850000)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ((instr >> 16) & 0x7) | ((instr >> 20) & 0x8); + unsigned rm = (instr >> 19) & 0xf; + if (InITBlock()) { + // ADD{} , ; T2 + add(CurrentCond(), Register(rd), Register(rm)); + } else { + // ADD{}{} {}, , ; T2 + add(CurrentCond(), + Narrow, + Register(rd), + Register(rd), + Register(rm)); + } + break; + } + } + break; + } + } + break; + } + case 0x05000000: { + // 0x45000000 + unsigned rn = ((instr >> 16) & 0x7) | ((instr >> 20) & 0x8); + unsigned rm = (instr >> 19) & 0xf; + // CMP{}{} , ; T2 + cmp(CurrentCond(), Narrow, Register(rn), Register(rm)); + break; + } + case 0x06000000: { + // 0x46000000 + unsigned rd = ((instr >> 16) & 0x7) | ((instr >> 20) & 0x8); + unsigned rm = (instr >> 19) & 0xf; + // MOV{}{} , ; T1 + mov(CurrentCond(), Narrow, Register(rd), Register(rm)); + break; + } + case 0x07000000: { + // 0x47000000 + switch (instr & 0x00800000) { + case 0x00000000: { + // 0x47000000 + unsigned rm = (instr >> 19) & 0xf; + // BX{}{} ; T1 + bx(CurrentCond(), Register(rm)); + if (((instr & 0xff870000) != 0x47000000)) { + UnpredictableT32(instr); + } + break; + } + case 0x00800000: { + // 0x47800000 + unsigned rm = (instr >> 19) & 0xf; + // BLX{}{} ; T1 + blx(CurrentCond(), Register(rm)); + if (((instr & 0xff870000) != 0x47800000)) { + UnpredictableT32(instr); + } + break; + } + } + break; + } + } + break; + } + case 0x08000000: { + // 0x48000000 + unsigned rt = (instr >> 24) & 0x7; + int32_t imm = ((instr >> 16) & 0xff) << 2; + Location location(imm, kT32PcDelta); + // LDR{}{} ,
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000100: { + // 0xf900010d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000200: { + // 0xf900020d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000300: { + // 0xf900030d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000400: { + // 0xf900040d + if (((instr & 0x20) == 0x20)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000500: { + // 0xf900050d + if (((instr & 0x20) == 0x20)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000600: { + // 0xf900060d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000700: { + // 0xf900070d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000800: { + // 0xf900080d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000900: { + // 0xf900090d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000a00: { + // 0xf9000a0d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000002: { + // 0xf900000f + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf900000d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000100: { + // 0xf900010d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000200: { + // 0xf900020d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000300: { + // 0xf900030d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000400: { + // 0xf900040d + if (((instr & 0x20) == 0x20)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000500: { + // 0xf900050d + if (((instr & 0x20) == 0x20)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000600: { + // 0xf900060d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000700: { + // 0xf900070d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000800: { + // 0xf900080d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000900: { + // 0xf900090d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000a00: { + // 0xf9000a0d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf9000000 + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST4{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000100: { + // 0xf9000100 + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST4{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000200: { + // 0xf9000200 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000300: { + // 0xf9000300 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST2{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000400: { + // 0xf9000400 + if (((instr & 0xd) == 0xd) || + ((instr & 0x20) == 0x20)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST3{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000500: { + // 0xf9000500 + if (((instr & 0xd) == 0xd) || + ((instr & 0x20) == 0x20)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST3{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000600: { + // 0xf9000600 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000700: { + // 0xf9000700 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000800: { + // 0xf9000800 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST2{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000900: { + // 0xf9000900 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST2{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000a00: { + // 0xf9000a00 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + case 0x01200000: { + // 0xf9200000 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf920000d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf920000d + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf920000d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000100: { + // 0xf920010d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000200: { + // 0xf920020d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000300: { + // 0xf920030d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000400: { + // 0xf920040d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000500: { + // 0xf920050d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000600: { + // 0xf920060d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000700: { + // 0xf920070d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000800: { + // 0xf920080d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000900: { + // 0xf920090d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000a00: { + // 0xf9200a0d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000002: { + // 0xf920000f + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf920000d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000100: { + // 0xf920010d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000200: { + // 0xf920020d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000300: { + // 0xf920030d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000400: { + // 0xf920040d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000500: { + // 0xf920050d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000600: { + // 0xf920060d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000700: { + // 0xf920070d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000800: { + // 0xf920080d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000900: { + // 0xf920090d + if (((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000a00: { + // 0xf9200a0d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf9200000 + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD4{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000100: { + // 0xf9200100 + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD4{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000200: { + // 0xf9200200 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000300: { + // 0xf9200300 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000400: { + // 0xf9200400 + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD3{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000500: { + // 0xf9200500 + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD3{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000600: { + // 0xf9200600 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000700: { + // 0xf9200700 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000800: { + // 0xf9200800 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000900: { + // 0xf9200900 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000a00: { + // 0xf9200a00 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + case 0x01800000: { + // 0xf9800000 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf9800000 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf9800c00 + UnallocatedT32(instr); + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf980000d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf980000d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf980000f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf9800100 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf9800d00 + UnallocatedT32(instr); + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf980010d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf980010d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf980010f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST2{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000200: { + // 0xf9800200 + switch (instr & 0x00000c30) { + case 0x00000010: { + // 0xf9800210 + UnallocatedT32(instr); + break; + } + case 0x00000030: { + // 0xf9800230 + UnallocatedT32(instr); + break; + } + case 0x00000410: { + // 0xf9800610 + UnallocatedT32(instr); + break; + } + case 0x00000430: { + // 0xf9800630 + UnallocatedT32(instr); + break; + } + case 0x00000810: { + // 0xf9800a10 + UnallocatedT32(instr); + break; + } + case 0x00000820: { + // 0xf9800a20 + UnallocatedT32(instr); + break; + } + case 0x00000830: { + // 0xf9800a30 + UnallocatedT32(instr); + break; + } + case 0x00000c00: { + // 0xf9800e00 + UnallocatedT32(instr); + break; + } + case 0x00000c10: { + // 0xf9800e10 + UnallocatedT32(instr); + break; + } + case 0x00000c20: { + // 0xf9800e20 + UnallocatedT32(instr); + break; + } + case 0x00000c30: { + // 0xf9800e30 + UnallocatedT32(instr); + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf980020d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf980020d + if (((instr & 0xc00) == 0xc00) || + ((instr & 0x810) == 0x10) || + ((instr & 0xc30) == 0x810) || + ((instr & 0xc30) == 0x820) || + ((instr & 0xc30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, []! ; T1 NOLINT(whitespace/line_length) + vst3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), PostIndex)); + break; + } + case 0x00000002: { + // 0xf980020f + if (((instr & 0xc00) == 0xc00) || + ((instr & 0x810) == 0x10) || + ((instr & 0xc30) == 0x810) || + ((instr & 0xc30) == 0x820) || + ((instr & 0xc30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [] ; T1 + vst3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd) || + ((instr & 0x810) == 0x10) || + ((instr & 0xc30) == 0x810) || + ((instr & 0xc30) == 0x820) || + ((instr & 0xc30) == 0x830)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + Sign sign(plus); + unsigned rm = instr & 0xf; + // VST3{}{}.
, [], # ; T1 NOLINT(whitespace/line_length) + vst3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), + sign, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000300: { + // 0xf9800300 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf9800f00 + UnallocatedT32(instr); + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf980030d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf980030d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vst4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf980030f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vst4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST4{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vst4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + } + break; + } + case 0x01a00000: { + // 0xf9a00000 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf9a00000 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf9a00c00 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf9a00c0d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf9a00c0d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_a_1_Decode((instr >> 4) & 0x1, + dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf9a00c0f + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_a_1_Decode((instr >> 4) & 0x1, + dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_a_1_Decode((instr >> 4) & 0x1, dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf9a0000d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf9a0000d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf9a0000f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld1(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf9a00100 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf9a00d00 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf9a00d0d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf9a00d0d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_a_2_Decode((instr >> 4) & 0x1, + dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 2; + spacing = kSingle; + break; + case 0x1: + length = 2; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf9a00d0f + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_a_2_Decode((instr >> 4) & 0x1, + dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 2; + spacing = kSingle; + break; + case 0x1: + length = 2; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_a_2_Decode((instr >> 4) & 0x1, dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 2; + spacing = kSingle; + break; + case 0x1: + length = 2; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf9a0010d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf9a0010d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf9a0010f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld2(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000200: { + // 0xf9a00200 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf9a00e00 + switch (instr & 0x00000010) { + case 0x00000000: { + // 0xf9a00e00 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf9a00e0d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf9a00e0d + DataType dt = Dt_size_7_Decode( + (instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 3; + spacing = kSingle; + break; + case 0x1: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, []! ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister( + first), + DRegister(last), + spacing, + transfer), + MemOperand(Register(rn), + PostIndex)); + break; + } + case 0x00000002: { + // 0xf9a00e0f + DataType dt = Dt_size_7_Decode( + (instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 3; + spacing = kSingle; + break; + case 0x1: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [] ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister( + first), + DRegister(last), + spacing, + transfer), + MemOperand(Register(rn), + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 3; + spacing = kSingle; + break; + case 0x1: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(plus); + unsigned rm = instr & 0xf; + // VLD3{}{}.
, [], # ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + MemOperand(Register(rn), + sign, + Register(rm), + PostIndex)); + break; + } + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf9a0020d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf9a0020d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, []! ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), PostIndex)); + break; + } + case 0x00000002: { + // 0xf9a0020f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [] ; T1 + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + Sign sign(plus); + unsigned rm = instr & 0xf; + // VLD3{}{}.
, [], # ; T1 NOLINT(whitespace/line_length) + vld3(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), + sign, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000300: { + // 0xf9a00300 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf9a00f00 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf9a00f0d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf9a00f0d + DataType dt = + Dt_size_8_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_a_3_Decode((instr >> 4) & 0x1, + dt, + (instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf9a00f0f + DataType dt = + Dt_size_8_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_a_3_Decode((instr >> 4) & 0x1, + dt, + (instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_8_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + Alignment align = + Align_a_3_Decode((instr >> 4) & 0x1, + dt, + (instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedT32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD4{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf9a0030d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf9a0030d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}]! ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf9a0030f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> + 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}] ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedT32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = + decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD4{}{}.
, [{:}], ; T1 NOLINT(whitespace/line_length) + vld4(CurrentCond(), + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + } + break; + } + } + break; + } + case 0x10100000: { + // 0xf8100000 + switch (instr & 0x01400000) { + case 0x00000000: { + // 0xf8100000 + switch (instr & 0x000f0000) { + case 0x000f0000: { + // 0xf81f0000 + switch (instr & 0x0000f000) { + case 0x0000f000: { + // 0xf81ff000 + uint32_t U = (instr >> 23) & 0x1; + int32_t imm = instr & 0xfff; + if (U == 0) imm = -imm; + bool minus_zero = (imm == 0) && (U == 0); + Location location(imm, kT32PcDelta); + // PLD{}{}
, [{, #{+/-}}] ; T1 + vstr(CurrentCond(), + Untyped64, + DRegister(rd), + MemOperand(Register(rn), sign, offset, Offset)); + break; + } + case 0x00200000: { + // 0xed200a00 + if ((instr & 0x00800000) == 0x00000000) { + if (((Uint32((instr >> 16)) & Uint32(0xf)) == + Uint32(0xd))) { + unsigned first = ExtractSRegister(instr, 22, 12); + unsigned len = instr & 0xff; + // VPUSH{}{}{.} ; T2 + vpush(CurrentCond(), + kDataTypeValueNone, + SRegisterList(SRegister(first), len)); + if ((len == 0) || + ((first + len) > kNumberOfSRegisters)) { + UnpredictableT32(instr); + } + return; + } + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractSRegister(instr, 22, 12); + unsigned len = instr & 0xff; + // VSTMDB{}{}{.} !, ; T2 + vstmdb(CurrentCond(), + kDataTypeValueNone, + Register(rn), + WriteBack(WRITE_BACK), + SRegisterList(SRegister(first), len)); + if ((len == 0) || + ((first + len) > kNumberOfSRegisters)) { + UnpredictableT32(instr); + } + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00200100: { + // 0xed200b00 + switch (instr & 0x00800001) { + case 0x00000000: { + // 0xed200b00 + if (((Uint32((instr >> 16)) & Uint32(0xf)) == + Uint32(0xd))) { + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // VPUSH{}{}{.} ; T1 + vpush(CurrentCond(), + kDataTypeValueNone, + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || + (end > kMaxNumberOfDRegisters)) { + UnpredictableT32(instr); + } + return; + } + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // VSTMDB{}{}{.} !, ; T1 + vstmdb(CurrentCond(), + kDataTypeValueNone, + Register(rn), + WriteBack(WRITE_BACK), + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || + (end > kMaxNumberOfDRegisters)) { + UnpredictableT32(instr); + } + break; + } + case 0x00000001: { + // 0xed200b01 + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // FSTMDBX{}{} !, ; T1 + fstmdbx(CurrentCond(), + Register(rn), + WriteBack(WRITE_BACK), + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || (end > 16)) { + UnpredictableT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + case 0x01000e00: { + // 0xed000e00 + switch (instr & 0x0060f100) { + case 0x00005000: { + // 0xed005e00 + UnimplementedT32_32("STC", instr); + break; + } + case 0x00205000: { + // 0xed205e00 + UnimplementedT32_32("STC", instr); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x01100a00: { + // 0xed100a00 + switch (instr & 0x00200100) { + case 0x00000000: { + // 0xed100a00 + switch (instr & 0x000f0000) { + case 0x000f0000: { + // 0xed1f0a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + uint32_t U = (instr >> 23) & 0x1; + int32_t imm = instr & 0xff; + imm <<= 2; + if (U == 0) imm = -imm; + bool minus_zero = (imm == 0) && (U == 0); + Location location(imm, kT32PcDelta); + // VLDR{}{}{.32} ,
,
, [{, #{+/-}}] ; T1 NOLINT(whitespace/line_length) + vldr(CurrentCond(), + Untyped64, + DRegister(rd), + MemOperand(Register(rn), sign, offset, Offset)); + break; + } + } + break; + } + case 0x00200000: { + // 0xed300a00 + if ((instr & 0x00800000) == 0x00000000) { + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractSRegister(instr, 22, 12); + unsigned len = instr & 0xff; + // VLDMDB{}{}{.} !, ; T2 + vldmdb(CurrentCond(), + kDataTypeValueNone, + Register(rn), + WriteBack(WRITE_BACK), + SRegisterList(SRegister(first), len)); + if ((len == 0) || + ((first + len) > kNumberOfSRegisters)) { + UnpredictableT32(instr); + } + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00200100: { + // 0xed300b00 + switch (instr & 0x00800001) { + case 0x00000000: { + // 0xed300b00 + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // VLDMDB{}{}{.} !, ; T1 + vldmdb(CurrentCond(), + kDataTypeValueNone, + Register(rn), + WriteBack(WRITE_BACK), + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || + (end > kMaxNumberOfDRegisters)) { + UnpredictableT32(instr); + } + break; + } + case 0x00000001: { + // 0xed300b01 + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // FLDMDBX{}{} !, ; T1 + fldmdbx(CurrentCond(), + Register(rn), + WriteBack(WRITE_BACK), + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || (end > 16)) { + UnpredictableT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + case 0x01100e00: { + // 0xed100e00 + switch (instr & 0x0060f100) { + case 0x00005000: { + // 0xed105e00 + switch (instr & 0x000f0000) { + case 0x000f0000: { + // 0xed1f5e00 + UnimplementedT32_32("LDC", instr); + break; + } + default: { + if (((instr & 0xf0000) == 0xf0000)) { + UnallocatedT32(instr); + return; + } + UnimplementedT32_32("LDC", instr); + break; + } + } + break; + } + case 0x00205000: { + // 0xed305e00 + if (((instr & 0xf0000) == 0xf0000)) { + UnallocatedT32(instr); + return; + } + UnimplementedT32_32("LDC", instr); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x06000000: { + // 0xee000000 + switch (instr & 0x01000010) { + case 0x00000000: { + // 0xee000000 + switch (instr & 0x10b00f40) { + case 0x00000a00: { + // 0xee000a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMLA{}{}.F32 , , ; T2 + vmla(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00000a40: { + // 0xee000a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMLS{}{}.F32 , , ; T2 + vmls(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00000b00: { + // 0xee000b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLA{}{}.F64
, , ; T2 + vmla(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000b40: { + // 0xee000b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLS{}{}.F64
, , ; T2 + vmls(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00100a00: { + // 0xee100a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VNMLS{}{}.F32 , , ; T1 + vnmls(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00100a40: { + // 0xee100a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VNMLA{}{}.F32 , , ; T1 + vnmla(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00100b00: { + // 0xee100b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VNMLS{}{}.F64
, , ; T1 + vnmls(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00100b40: { + // 0xee100b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VNMLA{}{}.F64
, , ; T1 + vnmla(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200a00: { + // 0xee200a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMUL{}{}.F32 {}, , ; T2 + vmul(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00200a40: { + // 0xee200a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VNMUL{}{}.F32 {}, , ; T1 + vnmul(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00200b00: { + // 0xee200b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMUL{}{}.F64 {
}, , ; T2 + vmul(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200b40: { + // 0xee200b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VNMUL{}{}.F64 {
}, , ; T1 + vnmul(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00300a00: { + // 0xee300a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VADD{}{}.F32 {}, , ; T2 + vadd(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00300a40: { + // 0xee300a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSUB{}{}.F32 {}, , ; T2 + vsub(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00300b00: { + // 0xee300b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADD{}{}.F64 {
}, , ; T2 + vadd(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00300b40: { + // 0xee300b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUB{}{}.F64 {
}, , ; T2 + vsub(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00800a00: { + // 0xee800a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VDIV{}{}.F32 {}, , ; T1 + vdiv(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00800b00: { + // 0xee800b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VDIV{}{}.F64 {
}, , ; T1 + vdiv(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00900a00: { + // 0xee900a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VFNMS{}{}.F32 , , ; T1 + vfnms(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00900a40: { + // 0xee900a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VFNMA{}{}.F32 , , ; T1 + vfnma(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00900b00: { + // 0xee900b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFNMS{}{}.F64
, , ; T1 + vfnms(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00900b40: { + // 0xee900b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFNMA{}{}.F64
, , ; T1 + vfnma(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00a00a00: { + // 0xeea00a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VFMA{}{}.F32 , , ; T2 + vfma(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00a00a40: { + // 0xeea00a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VFMS{}{}.F32 , , ; T2 + vfms(CurrentCond(), + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00a00b00: { + // 0xeea00b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFMA{}{}.F64
, , ; T2 + vfma(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00a00b40: { + // 0xeea00b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFMS{}{}.F64
, , ; T2 + vfms(CurrentCond(), + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00b00a00: { + // 0xeeb00a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + uint32_t encoded_imm = + (instr & 0xf) | ((instr >> 12) & 0xf0); + NeonImmediate imm = + ImmediateVFP::Decode(encoded_imm); + // VMOV{}{}.F32 , # ; T2 + vmov(CurrentCond(), F32, SRegister(rd), imm); + if (((instr & 0xffb00ff0) != 0xeeb00a00)) { + UnpredictableT32(instr); + } + break; + } + case 0x00b00a40: { + // 0xeeb00a40 + switch (instr & 0x000e0000) { + case 0x00000000: { + // 0xeeb00a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeeb00a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMOV{}{}.F32 , ; T2 + vmov(CurrentCond(), + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0xeeb00ac0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VABS{}{}.F32 , ; T2 + vabs(CurrentCond(), + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0xeeb10a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VNEG{}{}.F32 , ; T2 + vneg(CurrentCond(), + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010080: { + // 0xeeb10ac0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSQRT{}{}.F32 , ; T1 + vsqrt(CurrentCond(), + F32, + SRegister(rd), + SRegister(rm)); + break; + } + } + break; + } + case 0x00020000: { + // 0xeeb20a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeeb20a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTB{}{}.F32.F16 , ; T1 + vcvtb(CurrentCond(), + F32, + F16, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0xeeb20ac0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTT{}{}.F32.F16 , ; T1 + vcvtt(CurrentCond(), + F32, + F16, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0xeeb30a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTB{}{}.F16.F32 , ; T1 + vcvtb(CurrentCond(), + F16, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010080: { + // 0xeeb30ac0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTT{}{}.F16.F32 , ; T1 + vcvtt(CurrentCond(), + F16, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + } + break; + } + case 0x00040000: { + // 0xeeb40a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeeb40a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCMP{}{}.F32 , ; T1 + vcmp(CurrentCond(), + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0xeeb40ac0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCMPE{}{}.F32 , ; T1 + vcmpe(CurrentCond(), + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0xeeb50a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + // VCMP{}{}.F32 , #0.0 ; T2 + vcmp(CurrentCond(), F32, SRegister(rd), 0.0); + if (((instr & 0xffbf0fff) != 0xeeb50a40)) { + UnpredictableT32(instr); + } + break; + } + case 0x00010080: { + // 0xeeb50ac0 + unsigned rd = ExtractSRegister(instr, 22, 12); + // VCMPE{}{}.F32 , #0.0 ; T2 + vcmpe(CurrentCond(), F32, SRegister(rd), 0.0); + if (((instr & 0xffbf0fff) != 0xeeb50ac0)) { + UnpredictableT32(instr); + } + break; + } + } + break; + } + case 0x00060000: { + // 0xeeb60a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeeb60a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTR{}{}.F32 , ; T1 + vrintr(CurrentCond(), + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0xeeb60ac0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTZ{}{}.F32 , ; T1 + vrintz(CurrentCond(), + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0xeeb70a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTX{}{}.F32 , ; T1 + vrintx(CurrentCond(), + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010080: { + // 0xeeb70ac0 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.F64.F32
, ; T1 + vcvt(CurrentCond(), + F64, + F32, + DRegister(rd), + SRegister(rm)); + break; + } + } + break; + } + case 0x00080000: { + // 0xeeb80a40 + if ((instr & 0x00010000) == 0x00000000) { + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.F32.
, ; T1 + vcvt(CurrentCond(), + F32, + dt, + SRegister(rd), + SRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x000a0000: { + // 0xeeba0a40 + DataType dt = Dt_U_sx_1_Decode(((instr >> 7) & 0x1) | + ((instr >> 15) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned offset = 32; + if (dt.Is(S16) || dt.Is(U16)) { + offset = 16; + } + uint32_t fbits = offset - (((instr >> 5) & 0x1) | + ((instr << 1) & 0x1e)); + // VCVT{}{}.F32.
, , # ; T1 + vcvt(CurrentCond(), + F32, + dt, + SRegister(rd), + SRegister(rd), + fbits); + break; + } + case 0x000c0000: { + // 0xeebc0a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeebc0a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTR{}{}.U32.F32 , ; T1 + vcvtr(CurrentCond(), + U32, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0xeebc0ac0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.U32.F32 , ; T1 + vcvt(CurrentCond(), + U32, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0xeebd0a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTR{}{}.S32.F32 , ; T1 + vcvtr(CurrentCond(), + S32, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010080: { + // 0xeebd0ac0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.S32.F32 , ; T1 + vcvt(CurrentCond(), + S32, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + } + break; + } + case 0x000e0000: { + // 0xeebe0a40 + DataType dt = Dt_U_sx_1_Decode(((instr >> 7) & 0x1) | + ((instr >> 15) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned offset = 32; + if (dt.Is(S16) || dt.Is(U16)) { + offset = 16; + } + uint32_t fbits = offset - (((instr >> 5) & 0x1) | + ((instr << 1) & 0x1e)); + // VCVT{}{}.
.F32 , , # ; T1 + vcvt(CurrentCond(), + dt, + F32, + SRegister(rd), + SRegister(rd), + fbits); + break; + } + } + break; + } + case 0x00b00b00: { + // 0xeeb00b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + uint32_t encoded_imm = + (instr & 0xf) | ((instr >> 12) & 0xf0); + NeonImmediate imm = + ImmediateVFP::Decode(encoded_imm); + // VMOV{}{}.F64
, # ; T2 + vmov(CurrentCond(), F64, DRegister(rd), imm); + if (((instr & 0xffb00ff0) != 0xeeb00b00)) { + UnpredictableT32(instr); + } + break; + } + case 0x00b00b40: { + // 0xeeb00b40 + switch (instr & 0x000e0000) { + case 0x00000000: { + // 0xeeb00b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeeb00b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMOV{}{}.F64
, ; T2 + vmov(CurrentCond(), + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000080: { + // 0xeeb00bc0 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABS{}{}.F64
, ; T2 + vabs(CurrentCond(), + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00010000: { + // 0xeeb10b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VNEG{}{}.F64
, ; T2 + vneg(CurrentCond(), + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00010080: { + // 0xeeb10bc0 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSQRT{}{}.F64
, ; T1 + vsqrt(CurrentCond(), + F64, + DRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00020000: { + // 0xeeb20b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeeb20b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTB{}{}.F64.F16
, ; T1 + vcvtb(CurrentCond(), + F64, + F16, + DRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0xeeb20bc0 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTT{}{}.F64.F16
, ; T1 + vcvtt(CurrentCond(), + F64, + F16, + DRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0xeeb30b40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTB{}{}.F16.F64 , ; T1 + vcvtb(CurrentCond(), + F16, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + case 0x00010080: { + // 0xeeb30bc0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTT{}{}.F16.F64 , ; T1 + vcvtt(CurrentCond(), + F16, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00040000: { + // 0xeeb40b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeeb40b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCMP{}{}.F64
, ; T1 + vcmp(CurrentCond(), + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000080: { + // 0xeeb40bc0 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCMPE{}{}.F64
, ; T1 + vcmpe(CurrentCond(), + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00010000: { + // 0xeeb50b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + // VCMP{}{}.F64
, #0.0 ; T2 + vcmp(CurrentCond(), F64, DRegister(rd), 0.0); + if (((instr & 0xffbf0fff) != 0xeeb50b40)) { + UnpredictableT32(instr); + } + break; + } + case 0x00010080: { + // 0xeeb50bc0 + unsigned rd = ExtractDRegister(instr, 22, 12); + // VCMPE{}{}.F64
, #0.0 ; T2 + vcmpe(CurrentCond(), F64, DRegister(rd), 0.0); + if (((instr & 0xffbf0fff) != 0xeeb50bc0)) { + UnpredictableT32(instr); + } + break; + } + } + break; + } + case 0x00060000: { + // 0xeeb60b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeeb60b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTR{}{}.F64
, ; T1 + vrintr(CurrentCond(), + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000080: { + // 0xeeb60bc0 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTZ{}{}.F64
, ; T1 + vrintz(CurrentCond(), + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00010000: { + // 0xeeb70b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTX{}{}.F64
, ; T1 + vrintx(CurrentCond(), + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00010080: { + // 0xeeb70bc0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVT{}{}.F32.F64 , ; T1 + vcvt(CurrentCond(), + F32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00080000: { + // 0xeeb80b40 + if ((instr & 0x00010000) == 0x00000000) { + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.F64.
, ; T1 + vcvt(CurrentCond(), + F64, + dt, + DRegister(rd), + SRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x000a0000: { + // 0xeeba0b40 + DataType dt = Dt_U_sx_1_Decode(((instr >> 7) & 0x1) | + ((instr >> 15) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned offset = 32; + if (dt.Is(S16) || dt.Is(U16)) { + offset = 16; + } + uint32_t fbits = offset - (((instr >> 5) & 0x1) | + ((instr << 1) & 0x1e)); + // VCVT{}{}.F64.
, , # ; T1 + vcvt(CurrentCond(), + F64, + dt, + DRegister(rd), + DRegister(rd), + fbits); + break; + } + case 0x000c0000: { + // 0xeebc0b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0xeebc0b40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTR{}{}.U32.F64 , ; T1 + vcvtr(CurrentCond(), + U32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + case 0x00000080: { + // 0xeebc0bc0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVT{}{}.U32.F64 , ; T1 + vcvt(CurrentCond(), + U32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + case 0x00010000: { + // 0xeebd0b40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTR{}{}.S32.F64 , ; T1 + vcvtr(CurrentCond(), + S32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + case 0x00010080: { + // 0xeebd0bc0 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVT{}{}.S32.F64 , ; T1 + vcvt(CurrentCond(), + S32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x000e0000: { + // 0xeebe0b40 + DataType dt = Dt_U_sx_1_Decode(((instr >> 7) & 0x1) | + ((instr >> 15) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned offset = 32; + if (dt.Is(S16) || dt.Is(U16)) { + offset = 16; + } + uint32_t fbits = offset - (((instr >> 5) & 0x1) | + ((instr << 1) & 0x1e)); + // VCVT{}{}.
.F64 , , # ; T1 + vcvt(CurrentCond(), + dt, + F64, + DRegister(rd), + DRegister(rd), + fbits); + break; + } + } + break; + } + case 0x10000a00: { + // 0xfe000a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSELEQ.F32 , , ; T1 + vseleq(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10000b00: { + // 0xfe000b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSELEQ.F64
, , ; T1 + vseleq(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10100a00: { + // 0xfe100a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSELVS.F32 , , ; T1 + vselvs(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10100b00: { + // 0xfe100b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSELVS.F64
, , ; T1 + vselvs(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10200a00: { + // 0xfe200a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSELGE.F32 , , ; T1 + vselge(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10200b00: { + // 0xfe200b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSELGE.F64
, , ; T1 + vselge(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10300a00: { + // 0xfe300a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSELGT.F32 , , ; T1 + vselgt(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10300b00: { + // 0xfe300b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSELGT.F64
, , ; T1 + vselgt(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10800a00: { + // 0xfe800a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMAXNM{}.F32 , , ; T2 + vmaxnm(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10800a40: { + // 0xfe800a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMINNM{}.F32 , , ; T2 + vminnm(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10800b00: { + // 0xfe800b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMAXNM{}.F64
, , ; T2 + vmaxnm(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10800b40: { + // 0xfe800b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMINNM{}.F64
, , ; T2 + vminnm(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10b00a40: { + // 0xfeb00a40 + switch (instr & 0x000f0000) { + case 0x00080000: { + // 0xfeb80a40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTA{}.F32 , ; T1 + vrinta(F32, SRegister(rd), SRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00090000: { + // 0xfeb90a40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTN{}.F32 , ; T1 + vrintn(F32, SRegister(rd), SRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x000a0000: { + // 0xfeba0a40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTP{}.F32 , ; T1 + vrintp(F32, SRegister(rd), SRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x000b0000: { + // 0xfebb0a40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTM{}.F32 , ; T1 + vrintm(F32, SRegister(rd), SRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x000c0000: { + // 0xfebc0a40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTA{}.
.F32 , ; T1 + vcvta(dt, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x000d0000: { + // 0xfebd0a40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTN{}.
.F32 , ; T1 + vcvtn(dt, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x000e0000: { + // 0xfebe0a40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTP{}.
.F32 , ; T1 + vcvtp(dt, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x000f0000: { + // 0xfebf0a40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTM{}.
.F32 , ; T1 + vcvtm(dt, F32, SRegister(rd), SRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x10b00b40: { + // 0xfeb00b40 + switch (instr & 0x000f0000) { + case 0x00080000: { + // 0xfeb80b40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTA{}.F64
, ; T1 + vrinta(F64, DRegister(rd), DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00090000: { + // 0xfeb90b40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTN{}.F64
, ; T1 + vrintn(F64, DRegister(rd), DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x000a0000: { + // 0xfeba0b40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTP{}.F64
, ; T1 + vrintp(F64, DRegister(rd), DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x000b0000: { + // 0xfebb0b40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTM{}.F64
, ; T1 + vrintm(F64, DRegister(rd), DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x000c0000: { + // 0xfebc0b40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTA{}.
.F64 , ; T1 + vcvta(dt, F64, SRegister(rd), DRegister(rm)); + break; + } + case 0x000d0000: { + // 0xfebd0b40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTN{}.
.F64 , ; T1 + vcvtn(dt, F64, SRegister(rd), DRegister(rm)); + break; + } + case 0x000e0000: { + // 0xfebe0b40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTP{}.
.F64 , ; T1 + vcvtp(dt, F64, SRegister(rd), DRegister(rm)); + break; + } + case 0x000f0000: { + // 0xfebf0b40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTM{}.
.F64 , ; T1 + vcvtm(dt, F64, SRegister(rd), DRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000010: { + // 0xee000010 + switch (instr & 0x10100e00) { + case 0x00000a00: { + // 0xee000a10 + switch (instr & 0x00800100) { + case 0x00000000: { + // 0xee000a10 + if ((instr & 0x00600000) == 0x00000000) { + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rt = (instr >> 12) & 0xf; + // VMOV{}{} , ; T1 + vmov(CurrentCond(), SRegister(rn), Register(rt)); + if (((instr & 0xfff00f7f) != 0xee000a10)) { + UnpredictableT32(instr); + } + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000100: { + // 0xee000b10 + unsigned lane; + DataType dt = + Dt_opc1_opc2_1_Decode(((instr >> 5) & 0x3) | + ((instr >> 19) & 0xc), + &lane); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 7, 16); + unsigned rt = (instr >> 12) & 0xf; + // VMOV{}{}{.} , ; T1 + vmov(CurrentCond(), + dt, + DRegisterLane(rd, lane), + Register(rt)); + if (((instr & 0xff900f1f) != 0xee000b10)) { + UnpredictableT32(instr); + } + break; + } + case 0x00800000: { + // 0xee800a10 + if ((instr & 0x00600000) == 0x00600000) { + unsigned spec_reg = (instr >> 16) & 0xf; + unsigned rt = (instr >> 12) & 0xf; + switch (spec_reg) { + case 0x0: + case 0x1: + case 0x8: { + // VMSR{}{} , ; T1 + vmsr(CurrentCond(), + SpecialFPRegister(spec_reg), + Register(rt)); + if (((instr & 0xfff00fff) != 0xeee00a10)) { + UnpredictableT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00800100: { + // 0xee800b10 + switch (instr & 0x00200040) { + case 0x00000000: { + // 0xee800b10 + DataType dt = Dt_B_E_1_Decode( + ((instr >> 5) & 0x1) | ((instr >> 21) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 7, 16); + unsigned rt = (instr >> 12) & 0xf; + // VDUP{}{}.
, ; T1 + vdup(CurrentCond(), + dt, + DRegister(rd), + Register(rt)); + if (((instr & 0xffb00f5f) != 0xee800b10)) { + UnpredictableT32(instr); + } + break; + } + case 0x00200000: { + // 0xeea00b10 + DataType dt = Dt_B_E_1_Decode( + ((instr >> 5) & 0x1) | ((instr >> 21) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 7, 16); + unsigned rt = (instr >> 12) & 0xf; + // VDUP{}{}.
, ; T1 + vdup(CurrentCond(), + dt, + QRegister(rd), + Register(rt)); + if (((instr & 0xffb00f5f) != 0xeea00b10)) { + UnpredictableT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + case 0x00000e00: { + // 0xee000e10 + UnimplementedT32_32("MCR", instr); + break; + } + case 0x00100a00: { + // 0xee100a10 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xee100a10 + switch (instr & 0x00e00000) { + case 0x00000000: { + // 0xee100a10 + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = ExtractSRegister(instr, 7, 16); + // VMOV{}{} , ; T1 + vmov(CurrentCond(), Register(rt), SRegister(rn)); + if (((instr & 0xfff00f7f) != 0xee100a10)) { + UnpredictableT32(instr); + } + break; + } + case 0x00e00000: { + // 0xeef00a10 + unsigned rt = (instr >> 12) & 0xf; + unsigned spec_reg = (instr >> 16) & 0xf; + switch (spec_reg) { + case 0x0: + case 0x1: + case 0x5: + case 0x6: + case 0x7: + case 0x8: { + // VMRS{}{} , ; T1 + vmrs(CurrentCond(), + RegisterOrAPSR_nzcv(rt), + SpecialFPRegister(spec_reg)); + if (((instr & 0xfff00fff) != 0xeef00a10)) { + UnpredictableT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000100: { + // 0xee100b10 + unsigned lane; + DataType dt = + Dt_U_opc1_opc2_1_Decode(((instr >> 5) & 0x3) | + ((instr >> 19) & + 0xc) | + ((instr >> 19) & + 0x10), + &lane); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = ExtractDRegister(instr, 7, 16); + // VMOV{}{}{.
} , ; T1 + vmov(CurrentCond(), + dt, + Register(rt), + DRegisterLane(rn, lane)); + if (((instr & 0xff100f1f) != 0xee100b10)) { + UnpredictableT32(instr); + } + break; + } + } + break; + } + case 0x00100e00: { + // 0xee100e10 + UnimplementedT32_32("MRC", instr); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x01000000: { + // 0xef000000 + switch (instr & 0x00800000) { + case 0x00000000: { + // 0xef000000 + switch (instr & 0x00000f40) { + case 0x00000000: { + // 0xef000000 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VHADD{}{}.
{
}, , ; T1 + vhadd(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000040: { + // 0xef000040 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VHADD{}{}.
{}, , ; T1 + vhadd(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000100: { + // 0xef000100 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRHADD{}{}.
{
}, , ; T1 + vrhadd(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000140: { + // 0xef000140 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRHADD{}{}.
{}, , ; T1 + vrhadd(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000200: { + // 0xef000200 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VHSUB{}{}.
{
}, , ; T1 + vhsub(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000240: { + // 0xef000240 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VHSUB{}{}.
{}, , ; T1 + vhsub(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000300: { + // 0xef000300 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGT{}{}.
{
}, , ; T1 + vcgt(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000340: { + // 0xef000340 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGT{}{}.
{}, , ; T1 + vcgt(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000400: { + // 0xef000400 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + unsigned rn = ExtractDRegister(instr, 7, 16); + // VSHL{}{}.
{
}, , ; T1 + vshl(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + DRegister(rn)); + break; + } + case 0x00000440: { + // 0xef000440 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + // VSHL{}{}.
{}, , ; T1 + vshl(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + QRegister(rn)); + break; + } + case 0x00000500: { + // 0xef000500 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + unsigned rn = ExtractDRegister(instr, 7, 16); + // VRSHL{}{}.
{
}, , ; T1 + vrshl(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + DRegister(rn)); + break; + } + case 0x00000540: { + // 0xef000540 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + // VRSHL{}{}.
{}, , ; T1 + vrshl(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + QRegister(rn)); + break; + } + case 0x00000600: { + // 0xef000600 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMAX{}{}.
{
}, , ; T1 + vmax(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000640: { + // 0xef000640 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMAX{}{}.
{}, , ; T1 + vmax(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000700: { + // 0xef000700 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABD{}{}.
{
}, , ; T1 + vabd(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000740: { + // 0xef000740 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VABD{}{}.
{}, , ; T1 + vabd(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000800: { + // 0xef000800 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef000800 + DataType dt = + Dt_size_2_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADD{}{}.
{
}, , ; T1 + vadd(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000800 + DataType dt = + Dt_size_2_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUB{}{}.
{
}, , ; T1 + vsub(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000840: { + // 0xef000840 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef000840 + DataType dt = + Dt_size_2_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VADD{}{}.
{}, , ; T1 + vadd(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000840 + DataType dt = + Dt_size_2_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VSUB{}{}.
{}, , ; T1 + vsub(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000900: { + // 0xef000900 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef000900 + DataType dt = + Dt_size_10_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLA{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vmla(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000900 + DataType dt = + Dt_size_10_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLS{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vmls(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000940: { + // 0xef000940 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef000940 + DataType dt = + Dt_size_10_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMLA{}{}. , , ; T1 NOLINT(whitespace/line_length) + vmla(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000940 + DataType dt = + Dt_size_10_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMLS{}{}. , , ; T1 NOLINT(whitespace/line_length) + vmls(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000a00: { + // 0xef000a00 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPMAX{}{}.
{
}, , ; T1 + vpmax(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000b00: { + // 0xef000b00 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef000b00 + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQDMULH{}{}.
{
}, , ; T1 + vqdmulh(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000b00 + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQRDMULH{}{}.
{
}, , ; T1 + vqrdmulh(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000b40: { + // 0xef000b40 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef000b40 + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQDMULH{}{}.
{}, , ; T1 + vqdmulh(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000b40 + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQRDMULH{}{}.
{}, , ; T1 + vqrdmulh(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000c40: { + // 0xef000c40 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000c40 + UnimplementedT32_32("SHA1C", instr); + break; + } + case 0x00100000: { + // 0xef100c40 + UnimplementedT32_32("SHA1P", instr); + break; + } + case 0x00200000: { + // 0xef200c40 + UnimplementedT32_32("SHA1M", instr); + break; + } + case 0x00300000: { + // 0xef300c40 + UnimplementedT32_32("SHA1SU0", instr); + break; + } + case 0x10000000: { + // 0xff000c40 + UnimplementedT32_32("SHA256H", instr); + break; + } + case 0x10100000: { + // 0xff100c40 + UnimplementedT32_32("SHA256H2", instr); + break; + } + case 0x10200000: { + // 0xff200c40 + UnimplementedT32_32("SHA256SU1", instr); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000d00: { + // 0xef000d00 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000d00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADD{}{}.F32 {
}, , ; T1 + vadd(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200d00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUB{}{}.F32 {
}, , ; T1 + vsub(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000d00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPADD{}{}.F32 {
}, , ; T1 + vpadd(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10200000: { + // 0xff200d00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABD{}{}.F32 {
}, , ; T1 + vabd(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000d40: { + // 0xef000d40 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000d40 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VADD{}{}.F32 {}, , ; T1 + vadd(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200d40 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VSUB{}{}.F32 {}, , ; T1 + vsub(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10200000: { + // 0xff200d40 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VABD{}{}.F32 {}, , ; T1 + vabd(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000e00: { + // 0xef000e00 + switch (instr & 0x10200000) { + case 0x00000000: { + // 0xef000e00 + DataType dt = Dt_sz_1_Decode((instr >> 20) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCEQ{}{}.
{
}, , ; T2 + vceq(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000e00 + if ((instr & 0x00100000) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGE{}{}.F32 {
}, , ; T2 + vcge(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x10200000: { + // 0xff200e00 + if ((instr & 0x00100000) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGT{}{}.F32 {
}, , ; T2 + vcgt(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000e40: { + // 0xef000e40 + switch (instr & 0x10200000) { + case 0x00000000: { + // 0xef000e40 + DataType dt = Dt_sz_1_Decode((instr >> 20) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCEQ{}{}.
{}, , ; T2 + vceq(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000e40 + if ((instr & 0x00100000) == 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGE{}{}.F32 {}, , ; T2 + vcge(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x10200000: { + // 0xff200e40 + if ((instr & 0x00100000) == 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGT{}{}.F32 {}, , ; T2 + vcgt(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000f00: { + // 0xef000f00 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000f00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMAX{}{}.F32 {
}, , ; T1 + vmax(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200f00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMIN{}{}.F32 {
}, , ; T1 + vmin(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000f00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPMAX{}{}.F32 {
}, , ; T1 + vpmax(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10200000: { + // 0xff200f00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPMIN{}{}.F32 {
}, , ; T1 + vpmin(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000f40: { + // 0xef000f40 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000f40 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMAX{}{}.F32 {}, , ; T1 + vmax(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200f40 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMIN{}{}.F32 {}, , ; T1 + vmin(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00800000: { + // 0xef800000 + switch (instr & 0x00300000) { + case 0x00300000: { + // 0xefb00000 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xefb00000 + switch (instr & 0x00000040) { + case 0x00000000: { + // 0xefb00000 + if (((instr & 0x800) == 0x800)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm = (instr >> 8) & 0xf; + // VEXT{}{}.8 {
}, , , # ; T1 NOLINT(whitespace/line_length) + vext(CurrentCond(), + Untyped8, + DRegister(rd), + DRegister(rn), + DRegister(rm), + imm); + break; + } + case 0x00000040: { + // 0xefb00040 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm = (instr >> 8) & 0xf; + // VEXT{}{}.8 {}, , , # ; T1 NOLINT(whitespace/line_length) + vext(CurrentCond(), + Untyped8, + QRegister(rd), + QRegister(rn), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x10000000: { + // 0xffb00000 + switch (instr & 0x00000800) { + case 0x00000000: { + // 0xffb00000 + switch (instr & 0x00030200) { + case 0x00000000: { + // 0xffb00000 + switch (instr & 0x000005c0) { + case 0x00000000: { + // 0xffb00000 + DataType dt = Dt_size_7_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VREV64{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrev64(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000040: { + // 0xffb00040 + DataType dt = Dt_size_7_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VREV64{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrev64(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000080: { + // 0xffb00080 + DataType dt = Dt_size_15_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VREV32{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrev32(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x000000c0: { + // 0xffb000c0 + DataType dt = Dt_size_15_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VREV32{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrev32(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000100: { + // 0xffb00100 + DataType dt = Dt_size_1_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VREV16{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrev16(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000140: { + // 0xffb00140 + DataType dt = Dt_size_1_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VREV16{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrev16(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000400: { + // 0xffb00400 + DataType dt = Dt_size_5_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCLS{}{}.
, ; T1 + vcls(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000440: { + // 0xffb00440 + DataType dt = Dt_size_5_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCLS{}{}.
, ; T1 + vcls(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000480: { + // 0xffb00480 + DataType dt = Dt_size_4_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCLZ{}{}.
, ; T1 + vclz(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x000004c0: { + // 0xffb004c0 + DataType dt = Dt_size_4_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCLZ{}{}.
, ; T1 + vclz(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000500: { + // 0xffb00500 + if ((instr & 0x000c0000) == + 0x00000000) { + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCNT{}{}.8
, ; T1 + vcnt(CurrentCond(), + Untyped8, + DRegister(rd), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000540: { + // 0xffb00540 + if ((instr & 0x000c0000) == + 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCNT{}{}.8 , ; T1 + vcnt(CurrentCond(), + Untyped8, + QRegister(rd), + QRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000580: { + // 0xffb00580 + if ((instr & 0x000c0000) == + 0x00000000) { + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VMVN{}{}{.
}
, ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x000005c0: { + // 0xffb005c0 + if ((instr & 0x000c0000) == + 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VMVN{}{}{.
} , ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000200: { + // 0xffb00200 + switch (instr & 0x00000540) { + case 0x00000000: { + // 0xffb00200 + DataType dt = Dt_op_size_2_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 5) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VPADDL{}{}.
, ; T1 NOLINT(whitespace/line_length) + vpaddl(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000040: { + // 0xffb00240 + DataType dt = Dt_op_size_2_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 5) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VPADDL{}{}.
, ; T1 NOLINT(whitespace/line_length) + vpaddl(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000100: { + // 0xffb00300 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xffb00300 + UnimplementedT32_32("AESE", + instr); + break; + } + case 0x00000080: { + // 0xffb00380 + UnimplementedT32_32("AESMC", + instr); + break; + } + } + break; + } + case 0x00000140: { + // 0xffb00340 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xffb00340 + UnimplementedT32_32("AESD", + instr); + break; + } + case 0x00000080: { + // 0xffb003c0 + UnimplementedT32_32("AESIMC", + instr); + break; + } + } + break; + } + case 0x00000400: { + // 0xffb00600 + DataType dt = Dt_op_size_2_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 5) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VPADAL{}{}.
, ; T1 NOLINT(whitespace/line_length) + vpadal(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000440: { + // 0xffb00640 + DataType dt = Dt_op_size_2_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 5) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VPADAL{}{}.
, ; T1 NOLINT(whitespace/line_length) + vpadal(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000500: { + // 0xffb00700 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xffb00700 + DataType dt = Dt_size_5_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VQABS{}{}.
, ; T1 NOLINT(whitespace/line_length) + vqabs(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000080: { + // 0xffb00780 + DataType dt = Dt_size_5_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VQNEG{}{}.
, ; T1 NOLINT(whitespace/line_length) + vqneg(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000540: { + // 0xffb00740 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xffb00740 + DataType dt = Dt_size_5_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VQABS{}{}.
, ; T1 NOLINT(whitespace/line_length) + vqabs(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000080: { + // 0xffb007c0 + DataType dt = Dt_size_5_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VQNEG{}{}.
, ; T1 NOLINT(whitespace/line_length) + vqneg(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + } + break; + } + case 0x00010000: { + // 0xffb10000 + switch (instr & 0x000001c0) { + case 0x00000000: { + // 0xffb10000 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCGT{}{}.
{
}, , #0 ; T1 NOLINT(whitespace/line_length) + vcgt(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000040: { + // 0xffb10040 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCGT{}{}.
{}, , #0 ; T1 NOLINT(whitespace/line_length) + vcgt(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000080: { + // 0xffb10080 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCGE{}{}.
{
}, , #0 ; T1 NOLINT(whitespace/line_length) + vcge(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x000000c0: { + // 0xffb100c0 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCGE{}{}.
{}, , #0 ; T1 NOLINT(whitespace/line_length) + vcge(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000100: { + // 0xffb10100 + DataType dt = Dt_F_size_2_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCEQ{}{}.
{
}, , #0 ; T1 NOLINT(whitespace/line_length) + vceq(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000140: { + // 0xffb10140 + DataType dt = Dt_F_size_2_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCEQ{}{}.
{}, , #0 ; T1 NOLINT(whitespace/line_length) + vceq(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000180: { + // 0xffb10180 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCLE{}{}.
{
}, , #0 ; T1 NOLINT(whitespace/line_length) + vcle(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x000001c0: { + // 0xffb101c0 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCLE{}{}.
{}, , #0 ; T1 NOLINT(whitespace/line_length) + vcle(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + } + break; + } + case 0x00010200: { + // 0xffb10200 + switch (instr & 0x000001c0) { + case 0x00000000: { + // 0xffb10200 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCLT{}{}.
{
}, , #0 ; T1 NOLINT(whitespace/line_length) + vclt(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000040: { + // 0xffb10240 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCLT{}{}.
{}, , #0 ; T1 NOLINT(whitespace/line_length) + vclt(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + case 0x000000c0: { + // 0xffb102c0 + if ((instr & 0x000c0400) == + 0x00080000) { + UnimplementedT32_32("SHA1H", instr); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000100: { + // 0xffb10300 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VABS{}{}.
, ; T1 + vabs(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000140: { + // 0xffb10340 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VABS{}{}.
, ; T1 + vabs(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000180: { + // 0xffb10380 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VNEG{}{}.
, ; T1 + vneg(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x000001c0: { + // 0xffb103c0 + DataType dt = Dt_F_size_1_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VNEG{}{}.
, ; T1 + vneg(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00020000: { + // 0xffb20000 + switch (instr & 0x000005c0) { + case 0x00000000: { + // 0xffb20000 + if ((instr & 0x000c0000) == + 0x00000000) { + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VSWP{}{}{.
}
, ; T1 NOLINT(whitespace/line_length) + vswp(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000040: { + // 0xffb20040 + if ((instr & 0x000c0000) == + 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VSWP{}{}{.
} , ; T1 NOLINT(whitespace/line_length) + vswp(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000080: { + // 0xffb20080 + DataType dt = Dt_size_7_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VTRN{}{}.
, ; T1 + vtrn(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x000000c0: { + // 0xffb200c0 + DataType dt = Dt_size_7_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VTRN{}{}.
, ; T1 + vtrn(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000100: { + // 0xffb20100 + DataType dt = Dt_size_15_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VUZP{}{}.
, ; T1 + vuzp(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000140: { + // 0xffb20140 + DataType dt = Dt_size_7_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VUZP{}{}.
, ; T1 + vuzp(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000180: { + // 0xffb20180 + DataType dt = Dt_size_15_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VZIP{}{}.
, ; T1 + vzip(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x000001c0: { + // 0xffb201c0 + DataType dt = Dt_size_7_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VZIP{}{}.
, ; T1 + vzip(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000400: { + // 0xffb20400 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VRINTN{}.
, ; T1 + vrintn(dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000440: { + // 0xffb20440 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRINTN{}.
, ; T1 + vrintn(dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000480: { + // 0xffb20480 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VRINTX{}.
, ; T1 + vrintx(Condition::None(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x000004c0: { + // 0xffb204c0 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRINTX{}.
, ; T1 + vrintx(dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000500: { + // 0xffb20500 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VRINTA{}.
, ; T1 + vrinta(dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000540: { + // 0xffb20540 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRINTA{}.
, ; T1 + vrinta(dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000580: { + // 0xffb20580 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VRINTZ{}.
, ; T1 + vrintz(Condition::None(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x000005c0: { + // 0xffb205c0 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRINTZ{}.
, ; T1 + vrintz(dt, + QRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + case 0x00020200: { + // 0xffb20200 + switch (instr & 0x00000580) { + case 0x00000000: { + // 0xffb20200 + switch (instr & 0x00000040) { + case 0x00000000: { + // 0xffb20200 + DataType dt = Dt_size_3_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VMOVN{}{}.
, ; T1 NOLINT(whitespace/line_length) + vmovn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm)); + break; + } + case 0x00000040: { + // 0xffb20240 + DataType dt = Dt_size_14_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VQMOVUN{}{}.
, ; T1 NOLINT(whitespace/line_length) + vqmovun(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000080: { + // 0xffb20280 + DataType dt = Dt_op_size_3_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 4) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VQMOVN{}{}.
, ; T1 NOLINT(whitespace/line_length) + vqmovn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm)); + break; + } + case 0x00000100: { + // 0xffb20300 + if ((instr & 0x00000040) == + 0x00000000) { + DataType dt = Dt_size_17_Decode( + (instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm = dt.GetSize(); + // VSHLL{}{}. , , # ; T2 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000180: { + // 0xffb20380 + switch (instr & 0x000c0040) { + case 0x00080000: { + // 0xffba0380 + UnimplementedT32_32("SHA1SU1", + instr); + break; + } + case 0x00080040: { + // 0xffba03c0 + UnimplementedT32_32("SHA256SU0", + instr); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000400: { + // 0xffb20600 + if ((instr & 0x000c0040) == + 0x00040000) { + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCVT{}{}.F16.F32
, ; T1 NOLINT(whitespace/line_length) + vcvt(CurrentCond(), + F16, + F32, + DRegister(rd), + QRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000480: { + // 0xffb20680 + switch (instr & 0x00000040) { + case 0x00000000: { + // 0xffb20680 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VRINTM{}.
, ; T1 NOLINT(whitespace/line_length) + vrintm(dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000040: { + // 0xffb206c0 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRINTM{}.
, ; T1 NOLINT(whitespace/line_length) + vrintm(dt, + QRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000500: { + // 0xffb20700 + if ((instr & 0x000c0040) == + 0x00040000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCVT{}{}.F32.F16 , ; T1 NOLINT(whitespace/line_length) + vcvt(CurrentCond(), + F32, + F16, + QRegister(rd), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000580: { + // 0xffb20780 + switch (instr & 0x00000040) { + case 0x00000000: { + // 0xffb20780 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VRINTP{}.
, ; T1 NOLINT(whitespace/line_length) + vrintp(dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000040: { + // 0xffb207c0 + DataType dt = Dt_size_16_Decode( + (instr >> 18) & 0x3); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRINTP{}.
, ; T1 NOLINT(whitespace/line_length) + vrintp(dt, + QRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + } + break; + } + case 0x00030000: { + // 0xffb30000 + switch (instr & 0x00000440) { + case 0x00000000: { + // 0xffb30000 + switch (instr & 0x000c0100) { + case 0x00080000: { + // 0xffbb0000 + DataType dt = Dt_op_3_Decode( + (instr >> 7) & 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCVTA{}.
.F32
, ; T1 NOLINT(whitespace/line_length) + vcvta(dt, + F32, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00080100: { + // 0xffbb0100 + DataType dt = Dt_op_3_Decode( + (instr >> 7) & 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCVTN{}.
.F32
, ; T1 NOLINT(whitespace/line_length) + vcvtn(dt, + F32, + DRegister(rd), + DRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000040: { + // 0xffb30040 + switch (instr & 0x000c0100) { + case 0x00080000: { + // 0xffbb0040 + DataType dt = Dt_op_3_Decode( + (instr >> 7) & 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCVTA{}.
.F32 , ; T1 NOLINT(whitespace/line_length) + vcvta(dt, + F32, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00080100: { + // 0xffbb0140 + DataType dt = Dt_op_3_Decode( + (instr >> 7) & 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCVTN{}.
.F32 , ; T1 NOLINT(whitespace/line_length) + vcvtn(dt, + F32, + QRegister(rd), + QRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000400: { + // 0xffb30400 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xffb30400 + DataType dt = Dt_F_size_4_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VRECPE{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrecpe(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000080: { + // 0xffb30480 + DataType dt = Dt_F_size_4_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VRSQRTE{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrsqrte(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000440: { + // 0xffb30440 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xffb30440 + DataType dt = Dt_F_size_4_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRECPE{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrecpe(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000080: { + // 0xffb304c0 + DataType dt = Dt_F_size_4_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRSQRTE{}{}.
, ; T1 NOLINT(whitespace/line_length) + vrsqrte(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + } + break; + } + case 0x00030200: { + // 0xffb30200 + switch (instr & 0x000c0440) { + case 0x00080000: { + // 0xffbb0200 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xffbb0200 + DataType dt = Dt_op_3_Decode( + (instr >> 7) & 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCVTP{}.
.F32
, ; T1 NOLINT(whitespace/line_length) + vcvtp(dt, + F32, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000100: { + // 0xffbb0300 + DataType dt = Dt_op_3_Decode( + (instr >> 7) & 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCVTM{}.
.F32
, ; T1 NOLINT(whitespace/line_length) + vcvtm(dt, + F32, + DRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00080040: { + // 0xffbb0240 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xffbb0240 + DataType dt = Dt_op_3_Decode( + (instr >> 7) & 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCVTP{}.
.F32 , ; T1 NOLINT(whitespace/line_length) + vcvtp(dt, + F32, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000100: { + // 0xffbb0340 + DataType dt = Dt_op_3_Decode( + (instr >> 7) & 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCVTM{}.
.F32 , ; T1 NOLINT(whitespace/line_length) + vcvtm(dt, + F32, + QRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + case 0x00080400: { + // 0xffbb0600 + DataType dt1 = Dt_op_1_Decode1( + (instr >> 7) & 0x3); + if (dt1.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DataType dt2 = Dt_op_1_Decode2( + (instr >> 7) & 0x3); + if (dt2.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VCVT{}{}.
.
, ; T1 NOLINT(whitespace/line_length) + vcvt(CurrentCond(), + dt1, + dt2, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00080440: { + // 0xffbb0640 + DataType dt1 = Dt_op_1_Decode1( + (instr >> 7) & 0x3); + if (dt1.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DataType dt2 = Dt_op_1_Decode2( + (instr >> 7) & 0x3); + if (dt2.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VCVT{}{}.
.
, ; T1 NOLINT(whitespace/line_length) + vcvt(CurrentCond(), + dt1, + dt2, + QRegister(rd), + QRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + case 0x00000800: { + // 0xffb00800 + switch (instr & 0x00000440) { + case 0x00000000: { + // 0xffb00800 + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned first = + ExtractDRegister(instr, 7, 16); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0x3) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + case 0x2: + length = 3; + break; + case 0x3: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VTBL{}{}.8
, , ; T1 NOLINT(whitespace/line_length) + vtbl(CurrentCond(), + Untyped8, + DRegister(rd), + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + DRegister(rm)); + break; + } + case 0x00000040: { + // 0xffb00840 + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned first = + ExtractDRegister(instr, 7, 16); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0x3) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + case 0x2: + length = 3; + break; + case 0x3: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VTBX{}{}.8
, , ; T1 NOLINT(whitespace/line_length) + vtbx(CurrentCond(), + Untyped8, + DRegister(rd), + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + DRegister(rm)); + break; + } + case 0x00000400: { + // 0xffb00c00 + if ((instr & 0x00000380) == 0x00000000) { + unsigned lane; + DataType dt = + Dt_imm4_1_Decode((instr >> 16) & + 0xf, + &lane); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VDUP{}{}.
, ; T1 NOLINT(whitespace/line_length) + vdup(CurrentCond(), + dt, + DRegister(rd), + DRegisterLane(rm, lane)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000440: { + // 0xffb00c40 + if ((instr & 0x00000380) == 0x00000000) { + unsigned lane; + DataType dt = + Dt_imm4_1_Decode((instr >> 16) & + 0xf, + &lane); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VDUP{}{}.
, ; T1 NOLINT(whitespace/line_length) + vdup(CurrentCond(), + dt, + QRegister(rd), + DRegisterLane(rm, lane)); + } else { + UnallocatedT32(instr); + } + break; + } + } + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000c40) { + case 0x00000000: { + // 0xef800000 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef800000 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADDL{}{}.
, , ; T1 + vaddl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000100: { + // 0xef800100 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADDW{}{}.
{}, , ; T1 NOLINT(whitespace/line_length) + vaddw(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + DRegister(rm)); + break; + } + case 0x00000200: { + // 0xef800200 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUBL{}{}.
, , ; T1 + vsubl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000300: { + // 0xef800300 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUBW{}{}.
{}, , ; T1 NOLINT(whitespace/line_length) + vsubw(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000040: { + // 0xef800040 + switch (instr & 0x00000200) { + case 0x00000000: { + // 0xef800040 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef800040 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_9_Decode((instr >> 20) & 0x3, + (instr >> 8) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VMLA{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vmla(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x10000000: { + // 0xff800040 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_9_Decode((instr >> 20) & 0x3, + (instr >> 8) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = + ExtractQRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VMLA{}{}. , , ; T1 NOLINT(whitespace/line_length) + vmla(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + DRegisterLane(rm, lane)); + break; + } + } + break; + } + case 0x00000200: { + // 0xef800240 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800240 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_11_Decode((instr >> 20) & 0x3, + (instr >> 28) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VMLAL{}{}. , , ; T1 NOLINT(whitespace/line_length) + vmlal(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x00000100: { + // 0xef800340 + if ((instr & 0x10000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_size_13_Decode( + (instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + uint32_t mvm = (instr & 0xf) | + ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(S16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VQDMLAL{}{}.
, , [] ; T2 NOLINT(whitespace/line_length) + vqdmlal(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(vm), + index); + } else { + UnallocatedT32(instr); + } + break; + } + } + break; + } + } + break; + } + case 0x00000400: { + // 0xef800400 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef800400 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef800400 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_3_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = + ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VADDHN{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vaddhn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff800400 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_3_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = + ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRADDHN{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vraddhn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000100: { + // 0xef800500 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABAL{}{}.
, , ; T1 + vabal(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000200: { + // 0xef800600 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef800600 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_3_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = + ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VSUBHN{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vsubhn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff800600 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_3_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = + ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + // VRSUBHN{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vrsubhn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000300: { + // 0xef800700 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABDL{}{}.
, , ; T1 + vabdl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000440: { + // 0xef800440 + switch (instr & 0x00000200) { + case 0x00000000: { + // 0xef800440 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef800440 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_9_Decode((instr >> 20) & 0x3, + (instr >> 8) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VMLS{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vmls(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x10000000: { + // 0xff800440 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_9_Decode((instr >> 20) & 0x3, + (instr >> 8) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = + ExtractQRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VMLS{}{}. , , ; T1 NOLINT(whitespace/line_length) + vmls(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + DRegisterLane(rm, lane)); + break; + } + } + break; + } + case 0x00000200: { + // 0xef800640 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800640 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_11_Decode((instr >> 20) & 0x3, + (instr >> 28) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VMLSL{}{}. , , ; T1 NOLINT(whitespace/line_length) + vmlsl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x00000100: { + // 0xef800740 + if ((instr & 0x10000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_size_13_Decode( + (instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + uint32_t mvm = (instr & 0xf) | + ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(S16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VQDMLSL{}{}.
, , [] ; T2 NOLINT(whitespace/line_length) + vqdmlsl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(vm), + index); + } else { + UnallocatedT32(instr); + } + break; + } + } + break; + } + } + break; + } + case 0x00000800: { + // 0xef800800 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef800800 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_12_Decode((instr >> 20) & 0x3, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLAL{}{}. , , ; T1 NOLINT(whitespace/line_length) + vmlal(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000100: { + // 0xef800900 + if ((instr & 0x10000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQDMLAL{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vqdmlal(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000200: { + // 0xef800a00 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_12_Decode((instr >> 20) & 0x3, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLSL{}{}. , , ; T1 NOLINT(whitespace/line_length) + vmlsl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000300: { + // 0xef800b00 + if ((instr & 0x10000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQDMLSL{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vqdmlsl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + } + break; + } + case 0x00000840: { + // 0xef800840 + switch (instr & 0x00000200) { + case 0x00000000: { + // 0xef800840 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef800840 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_F_size_3_Decode( + ((instr >> 20) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + uint32_t mvm = + (instr & 0xf) | ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(I16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VMUL{}{}.
{
}, , [] ; T1 NOLINT(whitespace/line_length) + vmul(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(vm), + index); + break; + } + case 0x10000000: { + // 0xff800840 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_F_size_3_Decode( + ((instr >> 20) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = + ExtractQRegister(instr, 7, 16); + uint32_t mvm = + (instr & 0xf) | ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(I16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VMUL{}{}.
{}, , [] ; T1 NOLINT(whitespace/line_length) + vmul(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + DRegister(vm), + index); + break; + } + } + break; + } + case 0x00000200: { + // 0xef800a40 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800a40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_U_size_2_Decode( + ((instr >> 20) & 0x3) | + ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + uint32_t mvm = + (instr & 0xf) | ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(S16) || dt.Is(U16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VMULL{}{}.
, , [] ; T1 NOLINT(whitespace/line_length) + vmull(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(vm), + index); + break; + } + case 0x00000100: { + // 0xef800b40 + if ((instr & 0x10000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_size_13_Decode( + (instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VQDMULL{}{}.
, , ; T2 NOLINT(whitespace/line_length) + vqdmull(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + } else { + UnallocatedT32(instr); + } + break; + } + } + break; + } + } + break; + } + case 0x00000c00: { + // 0xef800c00 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800c00 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_op_U_size_1_Decode( + ((instr >> 20) & 0x3) | + ((instr >> 26) & 0x4) | + ((instr >> 6) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMULL{}{}.
, , ; T1 + vmull(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000100: { + // 0xef800d00 + if ((instr & 0x10000200) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rn = + ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQDMULL{}{}.
, , ; T1 NOLINT(whitespace/line_length) + vqdmull(CurrentCond(), + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + } + break; + } + case 0x00000c40: { + // 0xef800c40 + switch (instr & 0x10000300) { + case 0x00000000: { + // 0xef800c40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VQDMULH{}{}.
{
}, , ; T2 NOLINT(whitespace/line_length) + vqdmulh(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x00000100: { + // 0xef800d40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VQRDMULH{}{}.
{
}, , ; T2 NOLINT(whitespace/line_length) + vqrdmulh(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x10000000: { + // 0xff800c40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + int lane; + unsigned rm = ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VQDMULH{}{}.
{}, , ; T2 NOLINT(whitespace/line_length) + vqdmulh(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x10000100: { + // 0xff800d40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + int lane; + unsigned rm = ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VQRDMULH{}{}.
{}, , ; T2 NOLINT(whitespace/line_length) + vqrdmulh(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + DRegisterLane(rm, lane)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + } + break; + } + } + break; + } + case 0x01000010: { + // 0xef000010 + switch (instr & 0x00800040) { + case 0x00000000: { + // 0xef000010 + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xef000010 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQADD{}{}.
{
}, , ; T1 + vqadd(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000100: { + // 0xef000110 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VAND{}{}{.
} {
}, , ; T1 + vand(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00100000: { + // 0xef100110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VBIC{}{}{.
} {
}, , ; T1 + vbic(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200110 + if (((instr & 0x00000040) == 0x00000000) && + ((((Uint32((instr >> 7)) & Uint32(0x1)) + << 4) | + (Uint32((instr >> 16)) & Uint32(0xf))) == + (((Uint32((instr >> 5)) & Uint32(0x1)) + << 4) | + (Uint32(instr) & Uint32(0xf))))) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 7, 16); + // VMOV{}{}{.
}
, ; T1 + vmov(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rm)); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VORR{}{}{.
} {
}, , ; T1 + vorr(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00300000: { + // 0xef300110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VORN{}{}{.
} {
}, , ; T1 + vorn(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VEOR{}{}{.
} {
}, , ; T1 + veor(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10100000: { + // 0xff100110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VBSL{}{}{.
} {
}, , ; T1 + vbsl(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10200000: { + // 0xff200110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VBIT{}{}{.
} {
}, , ; T1 + vbit(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10300000: { + // 0xff300110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VBIF{}{}{.
} {
}, , ; T1 + vbif(CurrentCond(), + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000200: { + // 0xef000210 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQSUB{}{}.
{
}, , ; T1 + vqsub(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000300: { + // 0xef000310 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGE{}{}.
{
}, , ; T1 + vcge(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000400: { + // 0xef000410 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + unsigned rn = ExtractDRegister(instr, 7, 16); + // VQSHL{}{}.
{
}, , ; T1 + vqshl(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + DRegister(rn)); + break; + } + case 0x00000500: { + // 0xef000510 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + unsigned rn = ExtractDRegister(instr, 7, 16); + // VQRSHL{}{}.
{
}, , ; T1 + vqrshl(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + DRegister(rn)); + break; + } + case 0x00000600: { + // 0xef000610 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMIN{}{}.
{
}, , ; T1 + vmin(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000700: { + // 0xef000710 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABA{}{}.
, , ; T1 + vaba(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000800: { + // 0xef000810 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef000810 + DataType dt = + Dt_size_7_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VTST{}{}.
{
}, , ; T1 + vtst(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000810 + DataType dt = + Dt_size_4_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCEQ{}{}.
{
}, , ; T1 + vceq(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000900: { + // 0xef000910 + DataType dt = Dt_op_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMUL{}{}.
{
}, , ; T1 + vmul(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000a00: { + // 0xef000a10 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPMIN{}{}.
{
}, , ; T1 + vpmin(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000b00: { + // 0xef000b10 + if ((instr & 0x10000000) == 0x00000000) { + DataType dt = Dt_size_4_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPADD{}{}.
{
}, , ; T1 + vpadd(CurrentCond(), + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000c00: { + // 0xef000c10 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000c10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFMA{}{}.F32
, , ; T1 + vfma(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200c10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFMS{}{}.F32
, , ; T1 + vfms(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000d00: { + // 0xef000d10 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000d10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLA{}{}.F32
, , ; T1 + vmla(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200d10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLS{}{}.F32
, , ; T1 + vmls(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000d10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMUL{}{}.F32 {
}, , ; T1 + vmul(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000e00: { + // 0xef000e10 + switch (instr & 0x10300000) { + case 0x10000000: { + // 0xff000e10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VACGE{}{}.F32 {
}, , ; T1 + vacge(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10200000: { + // 0xff200e10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VACGT{}{}.F32 {
}, , ; T1 + vacgt(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000f00: { + // 0xef000f10 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000f10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRECPS{}{}.F32 {
}, , ; T1 + vrecps(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200f10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRSQRTS{}{}.F32 {
}, , ; T1 + vrsqrts(CurrentCond(), + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000f10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMAXNM{}.F32
, , ; T1 + vmaxnm(F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10200000: { + // 0xff200f10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMINNM{}.F32
, , ; T1 + vminnm(F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + case 0x00000040: { + // 0xef000050 + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xef000050 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQADD{}{}.
{}, , ; T1 + vqadd(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000100: { + // 0xef000150 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000150 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VAND{}{}{.
} {}, , ; T1 + vand(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00100000: { + // 0xef100150 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VBIC{}{}{.
} {}, , ; T1 + vbic(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200150 + if (((instr & 0x00000040) == 0x00000040) && + ((((Uint32((instr >> 7)) & Uint32(0x1)) + << 4) | + (Uint32((instr >> 16)) & Uint32(0xf))) == + (((Uint32((instr >> 5)) & Uint32(0x1)) + << 4) | + (Uint32(instr) & Uint32(0xf))))) { + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 7, 16); + // VMOV{}{}{.
} , ; T1 + vmov(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rm)); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VORR{}{}{.
} {}, , ; T1 + vorr(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00300000: { + // 0xef300150 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VORN{}{}{.
} {}, , ; T1 + vorn(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000150 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VEOR{}{}{.
} {}, , ; T1 + veor(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10100000: { + // 0xff100150 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VBSL{}{}{.
} {}, , ; T1 + vbsl(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10200000: { + // 0xff200150 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VBIT{}{}{.
} {}, , ; T1 + vbit(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10300000: { + // 0xff300150 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VBIF{}{}{.
} {}, , ; T1 + vbif(CurrentCond(), + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000200: { + // 0xef000250 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQSUB{}{}.
{}, , ; T1 + vqsub(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000300: { + // 0xef000350 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGE{}{}.
{}, , ; T1 + vcge(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000400: { + // 0xef000450 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + // VQSHL{}{}.
{}, , ; T1 + vqshl(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + QRegister(rn)); + break; + } + case 0x00000500: { + // 0xef000550 + DataType dt = Dt_U_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + // VQRSHL{}{}.
{}, , ; T1 + vqrshl(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + QRegister(rn)); + break; + } + case 0x00000600: { + // 0xef000650 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMIN{}{}.
{}, , ; T1 + vmin(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000700: { + // 0xef000750 + DataType dt = Dt_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VABA{}{}.
, , ; T1 + vaba(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000800: { + // 0xef000850 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef000850 + DataType dt = + Dt_size_7_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VTST{}{}.
{}, , ; T1 + vtst(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000850 + DataType dt = + Dt_size_4_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCEQ{}{}.
{}, , ; T1 + vceq(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000900: { + // 0xef000950 + DataType dt = Dt_op_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 26) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMUL{}{}.
{}, , ; T1 + vmul(CurrentCond(), + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00000c00: { + // 0xef000c50 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000c50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VFMA{}{}.F32 , , ; T1 + vfma(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200c50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VFMS{}{}.F32 , , ; T1 + vfms(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000d00: { + // 0xef000d50 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000d50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMLA{}{}.F32 , , ; T1 + vmla(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200d50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMLS{}{}.F32 , , ; T1 + vmls(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000d50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMUL{}{}.F32 {}, , ; T1 + vmul(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000e00: { + // 0xef000e50 + switch (instr & 0x10300000) { + case 0x10000000: { + // 0xff000e50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VACGE{}{}.F32 {}, , ; T1 + vacge(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10200000: { + // 0xff200e50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VACGT{}{}.F32 {}, , ; T1 + vacgt(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000f00: { + // 0xef000f50 + switch (instr & 0x10300000) { + case 0x00000000: { + // 0xef000f50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRECPS{}{}.F32 {}, , ; T1 + vrecps(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00200000: { + // 0xef200f50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRSQRTS{}{}.F32 {}, , ; T1 + vrsqrts(CurrentCond(), + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x10000000: { + // 0xff000f50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMAXNM{}.F32 , , ; T1 + vmaxnm(F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + case 0x10200000: { + // 0xff200f50 + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMINNM{}.F32 , , ; T1 + vminnm(F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + if (InITBlock()) { + UnpredictableT32(instr); + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00800000: { + // 0xef800010 + switch (instr & 0x00000c00) { + case 0x00000000: { + // 0xef800010 + switch (instr & 0x00380080) { + case 0x00000000: { + // 0xef800010 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800010 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xef800030 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xef800110 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xef800110 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VORR{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vorr(CurrentCond(), + dt, + DRegister(rd), + DRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xef800130 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VBIC{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vbic(CurrentCond(), + dt, + DRegister(rd), + DRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef800010 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VSHR{}{}. {
}, , # ; T1 NOLINT(whitespace/line_length) + vshr(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00000100: { + // 0xef800110 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VSRA{}{}. {
}, , # ; T1 NOLINT(whitespace/line_length) + vsra(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xef800210 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VRSHR{}{}. {
}, , # ; T1 NOLINT(whitespace/line_length) + vrshr(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00000300: { + // 0xef800310 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VRSRA{}{}. {
}, , # ; T1 NOLINT(whitespace/line_length) + vrsra(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000400: { + // 0xef800410 + switch (instr & 0x00380080) { + case 0x00000000: { + // 0xef800410 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800410 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xef800430 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xef800510 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xef800510 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VORR{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vorr(CurrentCond(), + dt, + DRegister(rd), + DRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xef800530 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VBIC{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vbic(CurrentCond(), + dt, + DRegister(rd), + DRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef800410 + if ((instr & 0x10000000) == 0x10000000) { + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_L_imm6_4_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VSRI{}{}.
{
}, , # ; T1 NOLINT(whitespace/line_length) + vsri(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + imm); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000100: { + // 0xef800510 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef800510 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_L_imm6_3_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - + (dt.IsSize(64) ? 0 : dt.GetSize()); + // VSHL{}{}.I {
}, , # ; T1 NOLINT(whitespace/line_length) + vshl(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x10000000: { + // 0xff800510 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_L_imm6_4_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - + (dt.IsSize(64) ? 0 : dt.GetSize()); + // VSLI{}{}.
{
}, , # ; T1 NOLINT(whitespace/line_length) + vsli(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000200: { + // 0xef800610 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_2_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VQSHLU{}{}. {
}, , # ; T1 NOLINT(whitespace/line_length) + vqshlu(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00000300: { + // 0xef800710 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VQSHL{}{}. {
}, , # ; T1 NOLINT(whitespace/line_length) + vqshl(CurrentCond(), + dt, + DRegister(rd), + DRegister(rm), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000800: { + // 0xef800810 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xef800810 + switch (instr & 0x00380000) { + case 0x00000000: { + // 0xef800810 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800810 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xef800830 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmvn:: + DecodeImmediate(cmode, + (instr & + 0xf) | + ((instr >> + 12) & + 0x70) | + ((instr >> + 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate(cmode, + (instr & + 0xf) | + ((instr >> + 12) & + 0x70) | + ((instr >> + 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xef800910 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xef800910 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VORR{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vorr(CurrentCond(), + dt, + DRegister(rd), + DRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xef800930 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VBIC{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vbic(CurrentCond(), + dt, + DRegister(rd), + DRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00180000: { + // 0xef980810 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef980810 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef980810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_imm6_3_Decode( + (instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; T1 NOLINT(whitespace/line_length) + vshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x10000000: { + // 0xff980810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & + 0x7, + (instr >> 28) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrun(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xef980910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xef980a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00280000: { + // 0xefa80810 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xefa80810 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xefa80810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_imm6_3_Decode( + (instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; T1 NOLINT(whitespace/line_length) + vshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x10000000: { + // 0xffa80810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & + 0x7, + (instr >> 28) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrun(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xefa80910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xefa80a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00300000: { + // 0xefb00810 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xefb00810 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xefb00810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_imm6_3_Decode( + (instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; T1 NOLINT(whitespace/line_length) + vshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x10000000: { + // 0xffb00810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & + 0x7, + (instr >> 28) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrun(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xefb00910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xefb00a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00380000: { + // 0xefb80810 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xefb80810 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xefb80810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_imm6_3_Decode( + (instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; T1 NOLINT(whitespace/line_length) + vshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x10000000: { + // 0xffb80810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & + 0x7, + (instr >> 28) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrun(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xefb80910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xefb80a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef800810 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef800810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_imm6_3_Decode( + (instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; T1 NOLINT(whitespace/line_length) + vshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x10000000: { + // 0xff800810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & + 0x7, + (instr >> 28) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrun(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xef800910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xef800a10 + switch (instr & 0x00070000) { + case 0x00000000: { + // 0xef800a10 + switch (instr & 0x003f0000) { + case 0x00080000: { + // 0xef880a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x380000) == + 0x180000) || + ((instr & 0x380000) == + 0x280000) || + ((instr & 0x380000) == + 0x300000) || + ((instr & 0x380000) == + 0x380000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_U_imm3H_1_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 25) & 0x8)); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VMOVL{}{}.
, ; T1 NOLINT(whitespace/line_length) + vmovl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm)); + break; + } + case 0x00090000: { + // 0xef890a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000a0000: { + // 0xef8a0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000b0000: { + // 0xef8b0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000c0000: { + // 0xef8c0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000d0000: { + // 0xef8d0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000e0000: { + // 0xef8e0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000f0000: { + // 0xef8f0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00100000: { + // 0xef900a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x380000) == + 0x180000) || + ((instr & 0x380000) == + 0x280000) || + ((instr & 0x380000) == + 0x300000) || + ((instr & 0x380000) == + 0x380000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_U_imm3H_1_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 25) & 0x8)); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VMOVL{}{}.
, ; T1 NOLINT(whitespace/line_length) + vmovl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm)); + break; + } + case 0x00110000: { + // 0xef910a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00120000: { + // 0xef920a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00130000: { + // 0xef930a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00140000: { + // 0xef940a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00150000: { + // 0xef950a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00160000: { + // 0xef960a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00170000: { + // 0xef970a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00180000: { + // 0xef980a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00190000: { + // 0xef990a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001a0000: { + // 0xef9a0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001b0000: { + // 0xef9b0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001c0000: { + // 0xef9c0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001d0000: { + // 0xef9d0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001e0000: { + // 0xef9e0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001f0000: { + // 0xef9f0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00200000: { + // 0xefa00a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x380000) == + 0x180000) || + ((instr & 0x380000) == + 0x280000) || + ((instr & 0x380000) == + 0x300000) || + ((instr & 0x380000) == + 0x380000)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_U_imm3H_1_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 25) & 0x8)); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VMOVL{}{}.
, ; T1 NOLINT(whitespace/line_length) + vmovl(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm)); + break; + } + case 0x00210000: { + // 0xefa10a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00220000: { + // 0xefa20a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00230000: { + // 0xefa30a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00240000: { + // 0xefa40a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00250000: { + // 0xefa50a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00260000: { + // 0xefa60a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00270000: { + // 0xefa70a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00280000: { + // 0xefa80a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00290000: { + // 0xefa90a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002a0000: { + // 0xefaa0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002b0000: { + // 0xefab0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002c0000: { + // 0xefac0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002d0000: { + // 0xefad0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002e0000: { + // 0xefae0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002f0000: { + // 0xefaf0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00300000: { + // 0xefb00a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00310000: { + // 0xefb10a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00320000: { + // 0xefb20a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00330000: { + // 0xefb30a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00340000: { + // 0xefb40a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00350000: { + // 0xefb50a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00360000: { + // 0xefb60a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00370000: { + // 0xefb70a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00380000: { + // 0xefb80a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00390000: { + // 0xefb90a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003a0000: { + // 0xefba0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003b0000: { + // 0xefbb0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003c0000: { + // 0xefbc0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003d0000: { + // 0xefbd0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003e0000: { + // 0xefbe0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003f0000: { + // 0xefbf0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == + 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> + 19) & + 0x7, + (instr >> + 28) & + 0x1); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = + (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: { + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == + 0x100000) || + ((instr & 0x3f0000) == + 0x200000)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 28) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; T1 NOLINT(whitespace/line_length) + vshll(CurrentCond(), + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000c00: { + // 0xef800c10 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xef800c10 + switch (instr & 0x00200000) { + case 0x00000000: { + // 0xef800c10 + switch (instr & 0x00180000) { + case 0x00000000: { + // 0xef800c10 + switch (instr & 0x00000300) { + case 0x00000200: { + // 0xef800e10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000300: { + // 0xef800f10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xef800c30 + switch (instr & 0x00000f20) { + case 0x00000000: { + // 0xef800c10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xef800c30 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000200: { + // 0xef800e10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000220: { + // 0xef800e30 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000400: { + // 0xef800c10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000420: { + // 0xef800c30 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000600: { + // 0xef800e10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000620: { + // 0xef800e30 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000800: { + // 0xef800c10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000820: { + // 0xef800c30 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000a00: { + // 0xef800e10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000a20: { + // 0xef800e30 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000c00: { + // 0xef800c10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000c20: { + // 0xef800c30 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000d00: { + // 0xef800d10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000d20: { + // 0xef800d30 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000e00: { + // 0xef800e10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000e20: { + // 0xef800e30 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + case 0x00000f00: { + // 0xef800f10 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, + 22, + 12); + DOperand imm = ImmediateVmov:: + DecodeImmediate(cmode, + (instr & + 0xf) | + ((instr >> + 12) & + 0x70) | + ((instr >> + 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + DRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: { + if ((instr & 0x00000200) == 0x00000200) { + if (((instr & 0x200000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt1 = Dt_op_U_1_Decode1( + ((instr >> 28) & 0x1) | + ((instr >> 7) & 0x2)); + if (dt1.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DataType dt2 = Dt_op_U_1_Decode2( + ((instr >> 28) & 0x1) | + ((instr >> 7) & 0x2)); + if (dt2.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t fbits = + 64 - ((instr >> 16) & 0x3f); + // VCVT{}{}.
.
, , # ; T1 NOLINT(whitespace/line_length) + vcvt(CurrentCond(), + dt1, + dt2, + DRegister(rd), + DRegister(rm), + fbits); + } else { + UnallocatedT32(instr); + } + break; + } + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + case 0x00800040: { + // 0xef800050 + switch (instr & 0x00000c00) { + case 0x00000000: { + // 0xef800050 + switch (instr & 0x00380080) { + case 0x00000000: { + // 0xef800050 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800050 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xef800070 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xef800150 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xef800150 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VORR{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vorr(CurrentCond(), + dt, + QRegister(rd), + QRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xef800170 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VBIC{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vbic(CurrentCond(), + dt, + QRegister(rd), + QRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef800050 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VSHR{}{}. {}, , # ; T1 NOLINT(whitespace/line_length) + vshr(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000100: { + // 0xef800150 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VSRA{}{}. {}, , # ; T1 NOLINT(whitespace/line_length) + vsra(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xef800250 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VRSHR{}{}. {}, , # ; T1 NOLINT(whitespace/line_length) + vrshr(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000300: { + // 0xef800350 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VRSRA{}{}. {}, , # ; T1 NOLINT(whitespace/line_length) + vrsra(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000400: { + // 0xef800450 + switch (instr & 0x00380080) { + case 0x00000000: { + // 0xef800450 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800450 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xef800470 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xef800550 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xef800550 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VORR{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vorr(CurrentCond(), + dt, + QRegister(rd), + QRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xef800570 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VBIC{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vbic(CurrentCond(), + dt, + QRegister(rd), + QRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef800450 + if ((instr & 0x10000000) == 0x10000000) { + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_L_imm6_4_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 + : (dt.GetSize() * 2)) - + imm6; + // VSRI{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vsri(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + imm); + } else { + UnallocatedT32(instr); + } + break; + } + case 0x00000100: { + // 0xef800550 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef800550 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_L_imm6_3_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - + (dt.IsSize(64) ? 0 : dt.GetSize()); + // VSHL{}{}.I {}, , # ; T1 NOLINT(whitespace/line_length) + vshl(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x10000000: { + // 0xff800550 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_L_imm6_4_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - + (dt.IsSize(64) ? 0 : dt.GetSize()); + // VSLI{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vsli(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000200: { + // 0xef800650 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_2_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VQSHLU{}{}. {}, , # ; T1 NOLINT(whitespace/line_length) + vqshlu(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000300: { + // 0xef800750 + if (((instr & 0x380080) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & + 0x8), + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VQSHL{}{}. {}, , # ; T1 NOLINT(whitespace/line_length) + vqshl(CurrentCond(), + dt, + QRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000800: { + // 0xef800850 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xef800850 + switch (instr & 0x00380000) { + case 0x00000000: { + // 0xef800850 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xef800850 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xef800870 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmvn:: + DecodeImmediate(cmode, + (instr & + 0xf) | + ((instr >> + 12) & + 0x70) | + ((instr >> + 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate(cmode, + (instr & + 0xf) | + ((instr >> + 12) & + 0x70) | + ((instr >> + 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xef800950 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xef800950 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VORR{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vorr(CurrentCond(), + dt, + QRegister(rd), + QRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xef800970 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VBIC{}{}.
{}, , # ; T1 NOLINT(whitespace/line_length) + vbic(CurrentCond(), + dt, + QRegister(rd), + QRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xef800850 + switch (instr & 0x10000000) { + case 0x00000000: { + // 0xef800850 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = Dt_imm6_3_Decode( + (instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VRSHRN{}{}.I
, , # ; T1 NOLINT(whitespace/line_length) + vrshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x10000000: { + // 0xff800850 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & + 0x7, + (instr >> 28) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQRSHRUN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqrshrun(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xef800950 + if (((instr & 0x380000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 28) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = + ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQRSHRN{}{}.
, , # ; T1 NOLINT(whitespace/line_length) + vqrshrn(CurrentCond(), + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + case 0x00000c00: { + // 0xef800c50 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xef800c50 + switch (instr & 0x00200000) { + case 0x00000000: { + // 0xef800c50 + switch (instr & 0x00180000) { + case 0x00000000: { + // 0xef800c50 + switch (instr & 0x00000300) { + case 0x00000200: { + // 0xef800e50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000300: { + // 0xef800f50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 21) & 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xef800c70 + switch (instr & 0x00000f20) { + case 0x00000000: { + // 0xef800c50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xef800c70 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000200: { + // 0xef800e50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000220: { + // 0xef800e70 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000400: { + // 0xef800c50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000420: { + // 0xef800c70 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000600: { + // 0xef800e50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000620: { + // 0xef800e70 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000800: { + // 0xef800c50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000820: { + // 0xef800c70 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000a00: { + // 0xef800e50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000a20: { + // 0xef800e70 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000c00: { + // 0xef800c50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000c20: { + // 0xef800c70 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000d00: { + // 0xef800d50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000d20: { + // 0xef800d70 + if (((instr & 0xd00) == + 0x100) || + ((instr & 0xd00) == + 0x500) || + ((instr & 0xd00) == + 0x900) || + ((instr & 0xe00) == + 0xe00)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmvn:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMVN{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmvn(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000e00: { + // 0xef800e50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000e20: { + // 0xef800e70 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + case 0x00000f00: { + // 0xef800f50 + if (((instr & 0x920) == + 0x100) || + ((instr & 0x520) == + 0x100) || + ((instr & 0x820) == + 0x20) || + ((instr & 0x420) == + 0x20) || + ((instr & 0x220) == + 0x20) || + ((instr & 0x120) == + 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != + 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & + 0x70) | + ((instr >> 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedT32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt( + cmode); + if (dt.Is( + kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, + 22, + 12); + QOperand imm = ImmediateVmov:: + DecodeImmediate(cmode, + (instr & + 0xf) | + ((instr >> + 12) & + 0x70) | + ((instr >> + 21) & + 0x80)); + // VMOV{}{}.
, # ; T1 NOLINT(whitespace/line_length) + vmov(CurrentCond(), + dt, + QRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + default: { + if ((instr & 0x00000200) == 0x00000200) { + if (((instr & 0x200000) == 0x0)) { + UnallocatedT32(instr); + return; + } + DataType dt1 = Dt_op_U_1_Decode1( + ((instr >> 28) & 0x1) | + ((instr >> 7) & 0x2)); + if (dt1.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + DataType dt2 = Dt_op_U_1_Decode2( + ((instr >> 28) & 0x1) | + ((instr >> 7) & 0x2)); + if (dt2.Is(kDataTypeValueInvalid)) { + UnallocatedT32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedT32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t fbits = + 64 - ((instr >> 16) & 0x3f); + // VCVT{}{}.
.
, , # ; T1 NOLINT(whitespace/line_length) + vcvt(CurrentCond(), + dt1, + dt2, + QRegister(rd), + QRegister(rm), + fbits); + } else { + UnallocatedT32(instr); + } + break; + } + } + break; + } + default: + UnallocatedT32(instr); + break; + } + break; + } + } + break; + } + } + break; + } + } + break; + } + } + break; + } + } + break; + } + } +} // NOLINT(readability/fn_size) + +void Disassembler::DecodeA32(uint32_t instr) { + A32CodeAddressIncrementer incrementer(&code_address_); + if ((instr & 0xf0000000) == 0xf0000000) { + switch (instr & 0x0e000000) { + case 0x00000000: { + // 0xf0000000 + switch (instr & 0x01f10020) { + case 0x01000000: { + // 0xf1000000 + switch (instr & 0x000e0000) { + case 0x00020000: { + // 0xf1020000 + if ((instr & 0x000001c0) == 0x00000000) { + UnimplementedA32("CPS", instr); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00080000: { + // 0xf1080000 + if ((instr & 0x0000001f) == 0x00000000) { + UnimplementedA32("CPSIE", instr); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000a0000: { + // 0xf10a0000 + UnimplementedA32("CPSIE", instr); + break; + } + case 0x000c0000: { + // 0xf10c0000 + if ((instr & 0x0000001f) == 0x00000000) { + UnimplementedA32("CPSID", instr); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000e0000: { + // 0xf10e0000 + UnimplementedA32("CPSID", instr); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01010000: { + // 0xf1010000 + if ((instr & 0x000000d0) == 0x00000000) { + UnimplementedA32("SETEND", instr); + } else { + UnallocatedA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x02000000: { + // 0xf2000000 + switch (instr & 0x00800010) { + case 0x00000000: { + // 0xf2000000 + switch (instr & 0x00000f40) { + case 0x00000000: { + // 0xf2000000 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VHADD{}{}.
{
}, , ; A1 + vhadd(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000040: { + // 0xf2000040 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VHADD{}{}.
{}, , ; A1 + vhadd(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000100: { + // 0xf2000100 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRHADD{}{}.
{
}, , ; A1 + vrhadd(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000140: { + // 0xf2000140 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRHADD{}{}.
{}, , ; A1 + vrhadd(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000200: { + // 0xf2000200 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VHSUB{}{}.
{
}, , ; A1 + vhsub(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000240: { + // 0xf2000240 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VHSUB{}{}.
{}, , ; A1 + vhsub(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000300: { + // 0xf2000300 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGT{}{}.
{
}, , ; A1 + vcgt(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000340: { + // 0xf2000340 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGT{}{}.
{}, , ; A1 + vcgt(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000400: { + // 0xf2000400 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + unsigned rn = ExtractDRegister(instr, 7, 16); + // VSHL{}{}.
{
}, , ; A1 + vshl(al, dt, DRegister(rd), DRegister(rm), DRegister(rn)); + break; + } + case 0x00000440: { + // 0xf2000440 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + // VSHL{}{}.
{}, , ; A1 + vshl(al, dt, QRegister(rd), QRegister(rm), QRegister(rn)); + break; + } + case 0x00000500: { + // 0xf2000500 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + unsigned rn = ExtractDRegister(instr, 7, 16); + // VRSHL{}{}.
{
}, , ; A1 + vrshl(al, dt, DRegister(rd), DRegister(rm), DRegister(rn)); + break; + } + case 0x00000540: { + // 0xf2000540 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + // VRSHL{}{}.
{}, , ; A1 + vrshl(al, dt, QRegister(rd), QRegister(rm), QRegister(rn)); + break; + } + case 0x00000600: { + // 0xf2000600 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMAX{}{}.
{
}, , ; A1 + vmax(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000640: { + // 0xf2000640 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMAX{}{}.
{}, , ; A1 + vmax(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000700: { + // 0xf2000700 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABD{}{}.
{
}, , ; A1 + vabd(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000740: { + // 0xf2000740 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VABD{}{}.
{}, , ; A1 + vabd(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000800: { + // 0xf2000800 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2000800 + DataType dt = Dt_size_2_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADD{}{}.
{
}, , ; A1 + vadd(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000800 + DataType dt = Dt_size_2_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUB{}{}.
{
}, , ; A1 + vsub(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + } + break; + } + case 0x00000840: { + // 0xf2000840 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2000840 + DataType dt = Dt_size_2_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VADD{}{}.
{}, , ; A1 + vadd(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000840 + DataType dt = Dt_size_2_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VSUB{}{}.
{}, , ; A1 + vsub(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + } + break; + } + case 0x00000900: { + // 0xf2000900 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2000900 + DataType dt = Dt_size_10_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLA{}{}.
, , ; A1 + vmla(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000900 + DataType dt = Dt_size_10_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLS{}{}.
, , ; A1 + vmls(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + } + break; + } + case 0x00000940: { + // 0xf2000940 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2000940 + DataType dt = Dt_size_10_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMLA{}{}. , , ; A1 + vmla(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000940 + DataType dt = Dt_size_10_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMLS{}{}. , , ; A1 + vmls(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + } + break; + } + case 0x00000a00: { + // 0xf2000a00 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPMAX{}{}.
{
}, , ; A1 + vpmax(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000b00: { + // 0xf2000b00 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2000b00 + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQDMULH{}{}.
{
}, , ; A1 + vqdmulh(al, + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000b00 + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQRDMULH{}{}.
{
}, , ; A1 + vqrdmulh(al, + dt, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000b40: { + // 0xf2000b40 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2000b40 + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQDMULH{}{}.
{}, , ; A1 + vqdmulh(al, + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000b40 + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQRDMULH{}{}.
{}, , ; A1 + vqrdmulh(al, + dt, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000c40: { + // 0xf2000c40 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000c40 + UnimplementedA32("SHA1C", instr); + break; + } + case 0x00100000: { + // 0xf2100c40 + UnimplementedA32("SHA1P", instr); + break; + } + case 0x00200000: { + // 0xf2200c40 + UnimplementedA32("SHA1M", instr); + break; + } + case 0x00300000: { + // 0xf2300c40 + UnimplementedA32("SHA1SU0", instr); + break; + } + case 0x01000000: { + // 0xf3000c40 + UnimplementedA32("SHA256H", instr); + break; + } + case 0x01100000: { + // 0xf3100c40 + UnimplementedA32("SHA256H2", instr); + break; + } + case 0x01200000: { + // 0xf3200c40 + UnimplementedA32("SHA256SU1", instr); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000d00: { + // 0xf2000d00 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000d00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADD{}{}.F32 {
}, , ; A1 + vadd(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200d00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUB{}{}.F32 {
}, , ; A1 + vsub(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000d00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPADD{}{}.F32 {
}, , ; A1 + vpadd(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01200000: { + // 0xf3200d00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABD{}{}.F32 {
}, , ; A1 + vabd(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000d40: { + // 0xf2000d40 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000d40 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VADD{}{}.F32 {}, , ; A1 + vadd(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200d40 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VSUB{}{}.F32 {}, , ; A1 + vsub(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x01200000: { + // 0xf3200d40 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VABD{}{}.F32 {}, , ; A1 + vabd(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000e00: { + // 0xf2000e00 + switch (instr & 0x01200000) { + case 0x00000000: { + // 0xf2000e00 + DataType dt = Dt_sz_1_Decode((instr >> 20) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCEQ{}{}.
{
}, , ; A2 + vceq(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000e00 + if ((instr & 0x00100000) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGE{}{}.F32 {
}, , ; A2 + vcge(al, + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x01200000: { + // 0xf3200e00 + if ((instr & 0x00100000) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGT{}{}.F32 {
}, , ; A2 + vcgt(al, + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000e40: { + // 0xf2000e40 + switch (instr & 0x01200000) { + case 0x00000000: { + // 0xf2000e40 + DataType dt = Dt_sz_1_Decode((instr >> 20) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCEQ{}{}.
{}, , ; A2 + vceq(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000e40 + if ((instr & 0x00100000) == 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGE{}{}.F32 {}, , ; A2 + vcge(al, + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x01200000: { + // 0xf3200e40 + if ((instr & 0x00100000) == 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGT{}{}.F32 {}, , ; A2 + vcgt(al, + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000f00: { + // 0xf2000f00 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000f00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMAX{}{}.F32 {
}, , ; A1 + vmax(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200f00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMIN{}{}.F32 {
}, , ; A1 + vmin(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000f00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPMAX{}{}.F32 {
}, , ; A1 + vpmax(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01200000: { + // 0xf3200f00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPMIN{}{}.F32 {
}, , ; A1 + vpmin(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000f40: { + // 0xf2000f40 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000f40 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMAX{}{}.F32 {}, , ; A1 + vmax(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200f40 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMIN{}{}.F32 {}, , ; A1 + vmin(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000010: { + // 0xf2000010 + switch (instr & 0x00000f40) { + case 0x00000000: { + // 0xf2000010 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQADD{}{}.
{
}, , ; A1 + vqadd(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000040: { + // 0xf2000050 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQADD{}{}.
{}, , ; A1 + vqadd(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000100: { + // 0xf2000110 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VAND{}{}{.
} {
}, , ; A1 + vand(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00100000: { + // 0xf2100110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VBIC{}{}{.
} {
}, , ; A1 + vbic(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200110 + if (((instr & 0x00000040) == 0x00000000) && + ((((Uint32((instr >> 7)) & Uint32(0x1)) << 4) | + (Uint32((instr >> 16)) & Uint32(0xf))) == + (((Uint32((instr >> 5)) & Uint32(0x1)) << 4) | + (Uint32(instr) & Uint32(0xf))))) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 7, 16); + // VMOV{}{}{.
}
, ; A1 + vmov(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rm)); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VORR{}{}{.
} {
}, , ; A1 + vorr(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00300000: { + // 0xf2300110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VORN{}{}{.
} {
}, , ; A1 + vorn(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VEOR{}{}{.
} {
}, , ; A1 + veor(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x01100000: { + // 0xf3100110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VBSL{}{}{.
} {
}, , ; A1 + vbsl(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x01200000: { + // 0xf3200110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VBIT{}{}{.
} {
}, , ; A1 + vbit(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x01300000: { + // 0xf3300110 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VBIF{}{}{.
} {
}, , ; A1 + vbif(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000140: { + // 0xf2000150 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000150 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VAND{}{}{.
} {}, , ; A1 + vand(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00100000: { + // 0xf2100150 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VBIC{}{}{.
} {}, , ; A1 + vbic(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200150 + if (((instr & 0x00000040) == 0x00000040) && + ((((Uint32((instr >> 7)) & Uint32(0x1)) << 4) | + (Uint32((instr >> 16)) & Uint32(0xf))) == + (((Uint32((instr >> 5)) & Uint32(0x1)) << 4) | + (Uint32(instr) & Uint32(0xf))))) { + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 7, 16); + // VMOV{}{}{.
} , ; A1 + vmov(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rm)); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VORR{}{}{.
} {}, , ; A1 + vorr(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00300000: { + // 0xf2300150 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VORN{}{}{.
} {}, , ; A1 + vorn(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000150 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VEOR{}{}{.
} {}, , ; A1 + veor(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x01100000: { + // 0xf3100150 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VBSL{}{}{.
} {}, , ; A1 + vbsl(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x01200000: { + // 0xf3200150 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VBIT{}{}{.
} {}, , ; A1 + vbit(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x01300000: { + // 0xf3300150 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VBIF{}{}{.
} {}, , ; A1 + vbif(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000200: { + // 0xf2000210 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQSUB{}{}.
{
}, , ; A1 + vqsub(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000240: { + // 0xf2000250 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQSUB{}{}.
{}, , ; A1 + vqsub(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000300: { + // 0xf2000310 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGE{}{}.
{
}, , ; A1 + vcge(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000340: { + // 0xf2000350 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGE{}{}.
{}, , ; A1 + vcge(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000400: { + // 0xf2000410 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + unsigned rn = ExtractDRegister(instr, 7, 16); + // VQSHL{}{}.
{
}, , ; A1 + vqshl(al, dt, DRegister(rd), DRegister(rm), DRegister(rn)); + break; + } + case 0x00000440: { + // 0xf2000450 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + // VQSHL{}{}.
{}, , ; A1 + vqshl(al, dt, QRegister(rd), QRegister(rm), QRegister(rn)); + break; + } + case 0x00000500: { + // 0xf2000510 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + unsigned rn = ExtractDRegister(instr, 7, 16); + // VQRSHL{}{}.
{
}, , ; A1 + vqrshl(al, dt, DRegister(rd), DRegister(rm), DRegister(rn)); + break; + } + case 0x00000540: { + // 0xf2000550 + DataType dt = Dt_U_size_3_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + // VQRSHL{}{}.
{}, , ; A1 + vqrshl(al, dt, QRegister(rd), QRegister(rm), QRegister(rn)); + break; + } + case 0x00000600: { + // 0xf2000610 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMIN{}{}.
{
}, , ; A1 + vmin(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000640: { + // 0xf2000650 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMIN{}{}.
{}, , ; A1 + vmin(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000700: { + // 0xf2000710 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABA{}{}.
, , ; A1 + vaba(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000740: { + // 0xf2000750 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VABA{}{}.
, , ; A1 + vaba(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000800: { + // 0xf2000810 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2000810 + DataType dt = Dt_size_7_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VTST{}{}.
{
}, , ; A1 + vtst(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000810 + DataType dt = Dt_size_4_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCEQ{}{}.
{
}, , ; A1 + vceq(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + } + break; + } + case 0x00000840: { + // 0xf2000850 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2000850 + DataType dt = Dt_size_7_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VTST{}{}.
{}, , ; A1 + vtst(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000850 + DataType dt = Dt_size_4_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCEQ{}{}.
{}, , ; A1 + vceq(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + } + break; + } + case 0x00000900: { + // 0xf2000910 + DataType dt = Dt_op_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMUL{}{}.
{
}, , ; A1 + vmul(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000940: { + // 0xf2000950 + DataType dt = Dt_op_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMUL{}{}.
{}, , ; A1 + vmul(al, dt, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00000a00: { + // 0xf2000a10 + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPMIN{}{}.
{
}, , ; A1 + vpmin(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00000b00: { + // 0xf2000b10 + if ((instr & 0x01000000) == 0x00000000) { + DataType dt = Dt_size_4_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPADD{}{}.
{
}, , ; A1 + vpadd(al, dt, DRegister(rd), DRegister(rn), DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000c00: { + // 0xf2000c10 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000c10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFMA{}{}.F32
, , ; A1 + vfma(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200c10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFMS{}{}.F32
, , ; A1 + vfms(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000c40: { + // 0xf2000c50 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000c50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VFMA{}{}.F32 , , ; A1 + vfma(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200c50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VFMS{}{}.F32 , , ; A1 + vfms(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000d00: { + // 0xf2000d10 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000d10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLA{}{}.F32
, , ; A1 + vmla(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200d10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLS{}{}.F32
, , ; A1 + vmls(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000d10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMUL{}{}.F32 {
}, , ; A1 + vmul(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000d40: { + // 0xf2000d50 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000d50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMLA{}{}.F32 , , ; A1 + vmla(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200d50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMLS{}{}.F32 , , ; A1 + vmls(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000d50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMUL{}{}.F32 {}, , ; A1 + vmul(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000e00: { + // 0xf2000e10 + switch (instr & 0x01300000) { + case 0x01000000: { + // 0xf3000e10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VACGE{}{}.F32 {
}, , ; A1 + vacge(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01200000: { + // 0xf3200e10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VACGT{}{}.F32 {
}, , ; A1 + vacgt(al, F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000e40: { + // 0xf2000e50 + switch (instr & 0x01300000) { + case 0x01000000: { + // 0xf3000e50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VACGE{}{}.F32 {}, , ; A1 + vacge(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x01200000: { + // 0xf3200e50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VACGT{}{}.F32 {}, , ; A1 + vacgt(al, F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000f00: { + // 0xf2000f10 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000f10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRECPS{}{}.F32 {
}, , ; A1 + vrecps(al, + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200f10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRSQRTS{}{}.F32 {
}, , ; A1 + vrsqrts(al, + F32, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000f10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMAXNM{}.F32
, , ; A1 + vmaxnm(F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x01200000: { + // 0xf3200f10 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMINNM{}.F32
, , ; A1 + vminnm(F32, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000f40: { + // 0xf2000f50 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf2000f50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRECPS{}{}.F32 {}, , ; A1 + vrecps(al, + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x00200000: { + // 0xf2200f50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRSQRTS{}{}.F32 {}, , ; A1 + vrsqrts(al, + F32, + QRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3000f50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMAXNM{}.F32 , , ; A1 + vmaxnm(F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + case 0x01200000: { + // 0xf3200f50 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMINNM{}.F32 , , ; A1 + vminnm(F32, QRegister(rd), QRegister(rn), QRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00800000: { + // 0xf2800000 + switch (instr & 0x00300000) { + case 0x00300000: { + // 0xf2b00000 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2b00000 + switch (instr & 0x00000040) { + case 0x00000000: { + // 0xf2b00000 + if (((instr & 0x800) == 0x800)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm = (instr >> 8) & 0xf; + // VEXT{}{}.8 {
}, , , # ; A1 + vext(al, + Untyped8, + DRegister(rd), + DRegister(rn), + DRegister(rm), + imm); + break; + } + case 0x00000040: { + // 0xf2b00040 + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm = (instr >> 8) & 0xf; + // VEXT{}{}.8 {}, , , # ; A1 + vext(al, + Untyped8, + QRegister(rd), + QRegister(rn), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x01000000: { + // 0xf3b00000 + switch (instr & 0x00000800) { + case 0x00000000: { + // 0xf3b00000 + switch (instr & 0x00030200) { + case 0x00000000: { + // 0xf3b00000 + switch (instr & 0x000005c0) { + case 0x00000000: { + // 0xf3b00000 + DataType dt = + Dt_size_7_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VREV64{}{}.
, ; A1 + vrev64(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000040: { + // 0xf3b00040 + DataType dt = + Dt_size_7_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VREV64{}{}.
, ; A1 + vrev64(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000080: { + // 0xf3b00080 + DataType dt = + Dt_size_15_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VREV32{}{}.
, ; A1 + vrev32(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x000000c0: { + // 0xf3b000c0 + DataType dt = + Dt_size_15_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VREV32{}{}.
, ; A1 + vrev32(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000100: { + // 0xf3b00100 + DataType dt = + Dt_size_1_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VREV16{}{}.
, ; A1 + vrev16(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000140: { + // 0xf3b00140 + DataType dt = + Dt_size_1_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VREV16{}{}.
, ; A1 + vrev16(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000400: { + // 0xf3b00400 + DataType dt = + Dt_size_5_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCLS{}{}.
, ; A1 + vcls(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000440: { + // 0xf3b00440 + DataType dt = + Dt_size_5_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCLS{}{}.
, ; A1 + vcls(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000480: { + // 0xf3b00480 + DataType dt = + Dt_size_4_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCLZ{}{}.
, ; A1 + vclz(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x000004c0: { + // 0xf3b004c0 + DataType dt = + Dt_size_4_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCLZ{}{}.
, ; A1 + vclz(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000500: { + // 0xf3b00500 + if ((instr & 0x000c0000) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCNT{}{}.8
, ; A1 + vcnt(al, + Untyped8, + DRegister(rd), + DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000540: { + // 0xf3b00540 + if ((instr & 0x000c0000) == 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCNT{}{}.8 , ; A1 + vcnt(al, + Untyped8, + QRegister(rd), + QRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000580: { + // 0xf3b00580 + if ((instr & 0x000c0000) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMVN{}{}{.
}
, ; A1 + vmvn(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000005c0: { + // 0xf3b005c0 + if ((instr & 0x000c0000) == 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMVN{}{}{.
} , ; A1 + vmvn(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000200: { + // 0xf3b00200 + switch (instr & 0x00000540) { + case 0x00000000: { + // 0xf3b00200 + DataType dt = + Dt_op_size_2_Decode(((instr >> 18) & 0x3) | + ((instr >> 5) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPADDL{}{}.
, ; A1 + vpaddl(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000040: { + // 0xf3b00240 + DataType dt = + Dt_op_size_2_Decode(((instr >> 18) & 0x3) | + ((instr >> 5) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VPADDL{}{}.
, ; A1 + vpaddl(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000100: { + // 0xf3b00300 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf3b00300 + UnimplementedA32("AESE", instr); + break; + } + case 0x00000080: { + // 0xf3b00380 + UnimplementedA32("AESMC", instr); + break; + } + } + break; + } + case 0x00000140: { + // 0xf3b00340 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf3b00340 + UnimplementedA32("AESD", instr); + break; + } + case 0x00000080: { + // 0xf3b003c0 + UnimplementedA32("AESIMC", instr); + break; + } + } + break; + } + case 0x00000400: { + // 0xf3b00600 + DataType dt = + Dt_op_size_2_Decode(((instr >> 18) & 0x3) | + ((instr >> 5) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VPADAL{}{}.
, ; A1 + vpadal(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000440: { + // 0xf3b00640 + DataType dt = + Dt_op_size_2_Decode(((instr >> 18) & 0x3) | + ((instr >> 5) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VPADAL{}{}.
, ; A1 + vpadal(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000500: { + // 0xf3b00700 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf3b00700 + DataType dt = + Dt_size_5_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQABS{}{}.
, ; A1 + vqabs(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000080: { + // 0xf3b00780 + DataType dt = + Dt_size_5_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQNEG{}{}.
, ; A1 + vqneg(al, dt, DRegister(rd), DRegister(rm)); + break; + } + } + break; + } + case 0x00000540: { + // 0xf3b00740 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf3b00740 + DataType dt = + Dt_size_5_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQABS{}{}.
, ; A1 + vqabs(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000080: { + // 0xf3b007c0 + DataType dt = + Dt_size_5_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQNEG{}{}.
, ; A1 + vqneg(al, dt, QRegister(rd), QRegister(rm)); + break; + } + } + break; + } + } + break; + } + case 0x00010000: { + // 0xf3b10000 + switch (instr & 0x000001c0) { + case 0x00000000: { + // 0xf3b10000 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGT{}{}.
{
}, , #0 ; A1 + vcgt(al, + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000040: { + // 0xf3b10040 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGT{}{}.
{}, , #0 ; A1 + vcgt(al, + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000080: { + // 0xf3b10080 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCGE{}{}.
{
}, , #0 ; A1 + vcge(al, + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x000000c0: { + // 0xf3b100c0 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCGE{}{}.
{}, , #0 ; A1 + vcge(al, + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000100: { + // 0xf3b10100 + DataType dt = + Dt_F_size_2_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCEQ{}{}.
{
}, , #0 ; A1 + vceq(al, + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000140: { + // 0xf3b10140 + DataType dt = + Dt_F_size_2_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCEQ{}{}.
{}, , #0 ; A1 + vceq(al, + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000180: { + // 0xf3b10180 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCLE{}{}.
{
}, , #0 ; A1 + vcle(al, + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x000001c0: { + // 0xf3b101c0 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCLE{}{}.
{}, , #0 ; A1 + vcle(al, + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + } + break; + } + case 0x00010200: { + // 0xf3b10200 + switch (instr & 0x000001c0) { + case 0x00000000: { + // 0xf3b10200 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCLT{}{}.
{
}, , #0 ; A1 + vclt(al, + dt, + DRegister(rd), + DRegister(rm), + UINT32_C(0)); + break; + } + case 0x00000040: { + // 0xf3b10240 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCLT{}{}.
{}, , #0 ; A1 + vclt(al, + dt, + QRegister(rd), + QRegister(rm), + UINT32_C(0)); + break; + } + case 0x000000c0: { + // 0xf3b102c0 + if ((instr & 0x000c0400) == 0x00080000) { + UnimplementedA32("SHA1H", instr); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000100: { + // 0xf3b10300 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABS{}{}.
, ; A1 + vabs(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000140: { + // 0xf3b10340 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VABS{}{}.
, ; A1 + vabs(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000180: { + // 0xf3b10380 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VNEG{}{}.
, ; A1 + vneg(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x000001c0: { + // 0xf3b103c0 + DataType dt = + Dt_F_size_1_Decode(((instr >> 18) & 0x3) | + ((instr >> 8) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VNEG{}{}.
, ; A1 + vneg(al, dt, QRegister(rd), QRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00020000: { + // 0xf3b20000 + switch (instr & 0x000005c0) { + case 0x00000000: { + // 0xf3b20000 + if ((instr & 0x000c0000) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSWP{}{}{.
}
, ; A1 + vswp(al, + kDataTypeValueNone, + DRegister(rd), + DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000040: { + // 0xf3b20040 + if ((instr & 0x000c0000) == 0x00000000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VSWP{}{}{.
} , ; A1 + vswp(al, + kDataTypeValueNone, + QRegister(rd), + QRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000080: { + // 0xf3b20080 + DataType dt = + Dt_size_7_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VTRN{}{}.
, ; A1 + vtrn(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x000000c0: { + // 0xf3b200c0 + DataType dt = + Dt_size_7_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VTRN{}{}.
, ; A1 + vtrn(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000100: { + // 0xf3b20100 + DataType dt = + Dt_size_15_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VUZP{}{}.
, ; A1 + vuzp(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000140: { + // 0xf3b20140 + DataType dt = + Dt_size_7_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VUZP{}{}.
, ; A1 + vuzp(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000180: { + // 0xf3b20180 + DataType dt = + Dt_size_15_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VZIP{}{}.
, ; A1 + vzip(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x000001c0: { + // 0xf3b201c0 + DataType dt = + Dt_size_7_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VZIP{}{}.
, ; A1 + vzip(al, dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000400: { + // 0xf3b20400 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTN{}.
, ; A1 + vrintn(dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000440: { + // 0xf3b20440 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRINTN{}.
, ; A1 + vrintn(dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000480: { + // 0xf3b20480 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTX{}.
, ; A1 + vrintx(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x000004c0: { + // 0xf3b204c0 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRINTX{}.
, ; A1 + vrintx(dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000500: { + // 0xf3b20500 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTA{}.
, ; A1 + vrinta(dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000540: { + // 0xf3b20540 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRINTA{}.
, ; A1 + vrinta(dt, QRegister(rd), QRegister(rm)); + break; + } + case 0x00000580: { + // 0xf3b20580 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTZ{}.
, ; A1 + vrintz(al, dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x000005c0: { + // 0xf3b205c0 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRINTZ{}.
, ; A1 + vrintz(dt, QRegister(rd), QRegister(rm)); + break; + } + } + break; + } + case 0x00020200: { + // 0xf3b20200 + switch (instr & 0x00000580) { + case 0x00000000: { + // 0xf3b20200 + switch (instr & 0x00000040) { + case 0x00000000: { + // 0xf3b20200 + DataType dt = + Dt_size_3_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VMOVN{}{}.
, ; A1 + vmovn(al, dt, DRegister(rd), QRegister(rm)); + break; + } + case 0x00000040: { + // 0xf3b20240 + DataType dt = + Dt_size_14_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQMOVUN{}{}.
, ; A1 + vqmovun(al, + dt, + DRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000080: { + // 0xf3b20280 + DataType dt = + Dt_op_size_3_Decode(((instr >> 18) & 0x3) | + ((instr >> 4) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VQMOVN{}{}.
, ; A1 + vqmovn(al, dt, DRegister(rd), QRegister(rm)); + break; + } + case 0x00000100: { + // 0xf3b20300 + if ((instr & 0x00000040) == 0x00000000) { + DataType dt = + Dt_size_17_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm = dt.GetSize(); + // VSHLL{}{}. , , # ; A2 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000180: { + // 0xf3b20380 + switch (instr & 0x000c0040) { + case 0x00080000: { + // 0xf3ba0380 + UnimplementedA32("SHA1SU1", instr); + break; + } + case 0x00080040: { + // 0xf3ba03c0 + UnimplementedA32("SHA256SU0", instr); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000400: { + // 0xf3b20600 + if ((instr & 0x000c0040) == 0x00040000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCVT{}{}.F16.F32
, ; A1 + vcvt(al, + F16, + F32, + DRegister(rd), + QRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000480: { + // 0xf3b20680 + switch (instr & 0x00000040) { + case 0x00000000: { + // 0xf3b20680 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTM{}.
, ; A1 + vrintm(dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000040: { + // 0xf3b206c0 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRINTM{}.
, ; A1 + vrintm(dt, QRegister(rd), QRegister(rm)); + break; + } + } + break; + } + case 0x00000500: { + // 0xf3b20700 + if ((instr & 0x000c0040) == 0x00040000) { + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVT{}{}.F32.F16 , ; A1 + vcvt(al, + F32, + F16, + QRegister(rd), + DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000580: { + // 0xf3b20780 + switch (instr & 0x00000040) { + case 0x00000000: { + // 0xf3b20780 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTP{}.
, ; A1 + vrintp(dt, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000040: { + // 0xf3b207c0 + DataType dt = + Dt_size_16_Decode((instr >> 18) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRINTP{}.
, ; A1 + vrintp(dt, QRegister(rd), QRegister(rm)); + break; + } + } + break; + } + } + break; + } + case 0x00030000: { + // 0xf3b30000 + switch (instr & 0x00000440) { + case 0x00000000: { + // 0xf3b30000 + switch (instr & 0x000c0100) { + case 0x00080000: { + // 0xf3bb0000 + DataType dt = + Dt_op_3_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTA{}.
.F32
, ; A1 + vcvta(dt, + F32, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00080100: { + // 0xf3bb0100 + DataType dt = + Dt_op_3_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTN{}.
.F32
, ; A1 + vcvtn(dt, + F32, + DRegister(rd), + DRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000040: { + // 0xf3b30040 + switch (instr & 0x000c0100) { + case 0x00080000: { + // 0xf3bb0040 + DataType dt = + Dt_op_3_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCVTA{}.
.F32 , ; A1 + vcvta(dt, + F32, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00080100: { + // 0xf3bb0140 + DataType dt = + Dt_op_3_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCVTN{}.
.F32 , ; A1 + vcvtn(dt, + F32, + QRegister(rd), + QRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000400: { + // 0xf3b30400 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf3b30400 + DataType dt = Dt_F_size_4_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRECPE{}{}.
, ; A1 + vrecpe(al, + dt, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000080: { + // 0xf3b30480 + DataType dt = Dt_F_size_4_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRSQRTE{}{}.
, ; A1 + vrsqrte(al, + dt, + DRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000440: { + // 0xf3b30440 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf3b30440 + DataType dt = Dt_F_size_4_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRECPE{}{}.
, ; A1 + vrecpe(al, + dt, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000080: { + // 0xf3b304c0 + DataType dt = Dt_F_size_4_Decode( + ((instr >> 18) & 0x3) | + ((instr >> 6) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRSQRTE{}{}.
, ; A1 + vrsqrte(al, + dt, + QRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + } + break; + } + case 0x00030200: { + // 0xf3b30200 + switch (instr & 0x000c0440) { + case 0x00080000: { + // 0xf3bb0200 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf3bb0200 + DataType dt = + Dt_op_3_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTP{}.
.F32
, ; A1 + vcvtp(dt, + F32, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000100: { + // 0xf3bb0300 + DataType dt = + Dt_op_3_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTM{}.
.F32
, ; A1 + vcvtm(dt, + F32, + DRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00080040: { + // 0xf3bb0240 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf3bb0240 + DataType dt = + Dt_op_3_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCVTP{}.
.F32 , ; A1 + vcvtp(dt, + F32, + QRegister(rd), + QRegister(rm)); + break; + } + case 0x00000100: { + // 0xf3bb0340 + DataType dt = + Dt_op_3_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCVTM{}.
.F32 , ; A1 + vcvtm(dt, + F32, + QRegister(rd), + QRegister(rm)); + break; + } + } + break; + } + case 0x00080400: { + // 0xf3bb0600 + DataType dt1 = + Dt_op_1_Decode1((instr >> 7) & 0x3); + if (dt1.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DataType dt2 = + Dt_op_1_Decode2((instr >> 7) & 0x3); + if (dt2.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVT{}{}.
.
, ; A1 + vcvt(al, + dt1, + dt2, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00080440: { + // 0xf3bb0640 + DataType dt1 = + Dt_op_1_Decode1((instr >> 7) & 0x3); + if (dt1.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DataType dt2 = + Dt_op_1_Decode2((instr >> 7) & 0x3); + if (dt2.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VCVT{}{}.
.
, ; A1 + vcvt(al, + dt1, + dt2, + QRegister(rd), + QRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + case 0x00000800: { + // 0xf3b00800 + switch (instr & 0x00000440) { + case 0x00000000: { + // 0xf3b00800 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned first = ExtractDRegister(instr, 7, 16); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0x3) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + case 0x2: + length = 3; + break; + case 0x3: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rm = ExtractDRegister(instr, 5, 0); + // VTBL{}{}.8
, , ; A1 + vtbl(al, + Untyped8, + DRegister(rd), + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + DRegister(rm)); + break; + } + case 0x00000040: { + // 0xf3b00840 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned first = ExtractDRegister(instr, 7, 16); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0x3) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + case 0x2: + length = 3; + break; + case 0x3: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rm = ExtractDRegister(instr, 5, 0); + // VTBX{}{}.8
, , ; A1 + vtbx(al, + Untyped8, + DRegister(rd), + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + DRegister(rm)); + break; + } + case 0x00000400: { + // 0xf3b00c00 + if ((instr & 0x00000380) == 0x00000000) { + unsigned lane; + DataType dt = + Dt_imm4_1_Decode((instr >> 16) & 0xf, &lane); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VDUP{}{}.
, ; A1 + vdup(al, + dt, + DRegister(rd), + DRegisterLane(rm, lane)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000440: { + // 0xf3b00c40 + if ((instr & 0x00000380) == 0x00000000) { + unsigned lane; + DataType dt = + Dt_imm4_1_Decode((instr >> 16) & 0xf, &lane); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VDUP{}{}.
, ; A1 + vdup(al, + dt, + QRegister(rd), + DRegisterLane(rm, lane)); + } else { + UnallocatedA32(instr); + } + break; + } + } + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000c40) { + case 0x00000000: { + // 0xf2800000 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2800000 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADDL{}{}.
, , ; A1 + vaddl(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000100: { + // 0xf2800100 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADDW{}{}.
{}, , ; A1 + vaddw(al, + dt, + QRegister(rd), + QRegister(rn), + DRegister(rm)); + break; + } + case 0x00000200: { + // 0xf2800200 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUBL{}{}.
, , ; A1 + vsubl(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000300: { + // 0xf2800300 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUBW{}{}.
{}, , ; A1 + vsubw(al, + dt, + QRegister(rd), + QRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000040: { + // 0xf2800040 + switch (instr & 0x00000200) { + case 0x00000000: { + // 0xf2800040 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2800040 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_9_Decode((instr >> 20) & 0x3, + (instr >> 8) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VMLA{}{}.
, , ; A1 NOLINT(whitespace/line_length) + vmla(al, + dt, + DRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x01000000: { + // 0xf3800040 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_9_Decode((instr >> 20) & 0x3, + (instr >> 8) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VMLA{}{}. , , ; A1 NOLINT(whitespace/line_length) + vmla(al, + dt, + QRegister(rd), + QRegister(rn), + DRegisterLane(rm, lane)); + break; + } + } + break; + } + case 0x00000200: { + // 0xf2800240 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800240 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_11_Decode((instr >> 20) & 0x3, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VMLAL{}{}. , , ; A1 NOLINT(whitespace/line_length) + vmlal(al, + dt, + QRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x00000100: { + // 0xf2800340 + if ((instr & 0x01000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + uint32_t mvm = + (instr & 0xf) | ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(S16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VQDMLAL{}{}.
, , [] ; A2 NOLINT(whitespace/line_length) + vqdmlal(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(vm), + index); + } else { + UnallocatedA32(instr); + } + break; + } + } + break; + } + } + break; + } + case 0x00000400: { + // 0xf2800400 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2800400 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2800400 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_3_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VADDHN{}{}.
, , ; A1 + vaddhn(al, + dt, + DRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3800400 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_3_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRADDHN{}{}.
, , ; A1 + vraddhn(al, + dt, + DRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000100: { + // 0xf2800500 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABAL{}{}.
, , ; A1 + vabal(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000200: { + // 0xf2800600 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2800600 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_3_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VSUBHN{}{}.
, , ; A1 + vsubhn(al, + dt, + DRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + case 0x01000000: { + // 0xf3800600 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_3_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + // VRSUBHN{}{}.
, , ; A1 + vrsubhn(al, + dt, + DRegister(rd), + QRegister(rn), + QRegister(rm)); + break; + } + } + break; + } + case 0x00000300: { + // 0xf2800700 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_size_1_Decode(((instr >> 20) & 0x3) | + ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABDL{}{}.
, , ; A1 + vabdl(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + } + break; + } + case 0x00000440: { + // 0xf2800440 + switch (instr & 0x00000200) { + case 0x00000000: { + // 0xf2800440 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2800440 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_9_Decode((instr >> 20) & 0x3, + (instr >> 8) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VMLS{}{}.
, , ; A1 NOLINT(whitespace/line_length) + vmls(al, + dt, + DRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x01000000: { + // 0xf3800440 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_9_Decode((instr >> 20) & 0x3, + (instr >> 8) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VMLS{}{}. , , ; A1 NOLINT(whitespace/line_length) + vmls(al, + dt, + QRegister(rd), + QRegister(rn), + DRegisterLane(rm, lane)); + break; + } + } + break; + } + case 0x00000200: { + // 0xf2800640 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800640 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_11_Decode((instr >> 20) & 0x3, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VMLSL{}{}. , , ; A1 NOLINT(whitespace/line_length) + vmlsl(al, + dt, + QRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x00000100: { + // 0xf2800740 + if ((instr & 0x01000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + uint32_t mvm = + (instr & 0xf) | ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(S16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VQDMLSL{}{}.
, , [] ; A2 NOLINT(whitespace/line_length) + vqdmlsl(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(vm), + index); + } else { + UnallocatedA32(instr); + } + break; + } + } + break; + } + } + break; + } + case 0x00000800: { + // 0xf2800800 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2800800 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_12_Decode((instr >> 20) & 0x3, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLAL{}{}. , , ; A1 + vmlal(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000100: { + // 0xf2800900 + if ((instr & 0x01000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQDMLAL{}{}.
, , ; A1 + vqdmlal(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000200: { + // 0xf2800a00 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_12_Decode((instr >> 20) & 0x3, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLSL{}{}. , , ; A1 + vmlsl(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000300: { + // 0xf2800b00 + if ((instr & 0x01000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQDMLSL{}{}.
, , ; A1 + vqdmlsl(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + } + break; + } + case 0x00000840: { + // 0xf2800840 + switch (instr & 0x00000200) { + case 0x00000000: { + // 0xf2800840 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2800840 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_F_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 6) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + uint32_t mvm = + (instr & 0xf) | ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(I16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VMUL{}{}.
{
}, , [] ; A1 NOLINT(whitespace/line_length) + vmul(al, + dt, + DRegister(rd), + DRegister(rn), + DRegister(vm), + index); + break; + } + case 0x01000000: { + // 0xf3800840 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_F_size_3_Decode( + ((instr >> 20) & 0x3) | ((instr >> 6) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + uint32_t mvm = + (instr & 0xf) | ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(I16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VMUL{}{}.
{}, , [] ; A1 NOLINT(whitespace/line_length) + vmul(al, + dt, + QRegister(rd), + QRegister(rn), + DRegister(vm), + index); + break; + } + } + break; + } + case 0x00000200: { + // 0xf2800a40 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800a40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_size_2_Decode( + ((instr >> 20) & 0x3) | ((instr >> 22) & 0x4)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + uint32_t mvm = + (instr & 0xf) | ((instr >> 1) & 0x10); + uint32_t shift = 4; + if (dt.Is(S16) || dt.Is(U16)) { + shift = 3; + } + uint32_t vm = mvm & ((1 << shift) - 1); + uint32_t index = mvm >> shift; + // VMULL{}{}.
, , [] ; A1 NOLINT(whitespace/line_length) + vmull(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(vm), + index); + break; + } + case 0x00000100: { + // 0xf2800b40 + if ((instr & 0x01000000) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = ExtractDRegisterAndLane(instr, + dt, + 5, + 0, + &lane); + // VQDMULL{}{}.
, , ; A2 + vqdmull(al, + dt, + QRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + } else { + UnallocatedA32(instr); + } + break; + } + } + break; + } + } + break; + } + case 0x00000c00: { + // 0xf2800c00 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800c00 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_op_U_size_1_Decode( + ((instr >> 20) & 0x3) | ((instr >> 22) & 0x4) | + ((instr >> 6) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMULL{}{}.
, , ; A1 + vmull(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000100: { + // 0xf2800d00 + if ((instr & 0x01000200) == 0x00000000) { + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VQDMULL{}{}.
, , ; A1 + vqdmull(al, + dt, + QRegister(rd), + DRegister(rn), + DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + } + break; + } + case 0x00000c40: { + // 0xf2800c40 + switch (instr & 0x01000300) { + case 0x00000000: { + // 0xf2800c40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VQDMULH{}{}.
{
}, , ; A2 + vqdmulh(al, + dt, + DRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x00000100: { + // 0xf2800d40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VQRDMULH{}{}.
{
}, , ; A2 + vqrdmulh(al, + dt, + DRegister(rd), + DRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x01000000: { + // 0xf3800c40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VQDMULH{}{}.
{}, , ; A2 + vqdmulh(al, + dt, + QRegister(rd), + QRegister(rn), + DRegisterLane(rm, lane)); + break; + } + case 0x01000100: { + // 0xf3800d40 + if (((instr & 0x300000) == 0x300000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_13_Decode((instr >> 20) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rn = ExtractQRegister(instr, 7, 16); + int lane; + unsigned rm = + ExtractDRegisterAndLane(instr, dt, 5, 0, &lane); + // VQRDMULH{}{}.
{}, , ; A2 + vqrdmulh(al, + dt, + QRegister(rd), + QRegister(rn), + DRegisterLane(rm, lane)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + } + break; + } + case 0x00800010: { + // 0xf2800010 + switch (instr & 0x00000040) { + case 0x00000000: { + // 0xf2800010 + switch (instr & 0x00000c00) { + case 0x00000000: { + // 0xf2800010 + switch (instr & 0x00380080) { + case 0x00000000: { + // 0xf2800010 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800010 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xf2800030 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 + vmvn(al, dt, DRegister(rd), imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, DRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf2800110 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xf2800110 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + DOperand imm = ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VORR{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vorr(al, dt, DRegister(rd), DRegister(rd), imm); + break; + } + case 0x00000020: { + // 0xf2800130 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + DOperand imm = ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VBIC{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vbic(al, dt, DRegister(rd), DRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2800010 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VSHR{}{}. {
}, , # ; A1 NOLINT(whitespace/line_length) + vshr(al, dt, DRegister(rd), DRegister(rm), imm); + break; + } + case 0x00000100: { + // 0xf2800110 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VSRA{}{}. {
}, , # ; A1 NOLINT(whitespace/line_length) + vsra(al, dt, DRegister(rd), DRegister(rm), imm); + break; + } + case 0x00000200: { + // 0xf2800210 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VRSHR{}{}. {
}, , # ; A1 NOLINT(whitespace/line_length) + vrshr(al, dt, DRegister(rd), DRegister(rm), imm); + break; + } + case 0x00000300: { + // 0xf2800310 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VRSRA{}{}. {
}, , # ; A1 NOLINT(whitespace/line_length) + vrsra(al, dt, DRegister(rd), DRegister(rm), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000400: { + // 0xf2800410 + switch (instr & 0x00380080) { + case 0x00000000: { + // 0xf2800410 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800410 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xf2800430 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 + vmvn(al, dt, DRegister(rd), imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, DRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf2800510 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xf2800510 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + DOperand imm = ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VORR{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vorr(al, dt, DRegister(rd), DRegister(rd), imm); + break; + } + case 0x00000020: { + // 0xf2800530 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + DOperand imm = ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VBIC{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vbic(al, dt, DRegister(rd), DRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2800410 + if ((instr & 0x01000000) == 0x01000000) { + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_L_imm6_4_Decode( + ((instr >> 19) & 0x7) | ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VSRI{}{}.
{
}, , # ; A1 + vsri(al, dt, DRegister(rd), DRegister(rm), imm); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000100: { + // 0xf2800510 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2800510 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_3_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VSHL{}{}.I {
}, , # ; A1 NOLINT(whitespace/line_length) + vshl(al, dt, DRegister(rd), DRegister(rm), imm); + break; + } + case 0x01000000: { + // 0xf3800510 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_4_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VSLI{}{}.
{
}, , # ; A1 + vsli(al, dt, DRegister(rd), DRegister(rm), imm); + break; + } + } + break; + } + case 0x00000200: { + // 0xf2800610 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_2_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VQSHLU{}{}. {
}, , # ; A1 NOLINT(whitespace/line_length) + vqshlu(al, dt, DRegister(rd), DRegister(rm), imm); + break; + } + case 0x00000300: { + // 0xf2800710 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VQSHL{}{}. {
}, , # ; A1 NOLINT(whitespace/line_length) + vqshl(al, dt, DRegister(rd), DRegister(rm), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000800: { + // 0xf2800810 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf2800810 + switch (instr & 0x00380000) { + case 0x00000000: { + // 0xf2800810 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800810 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xf2800830 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 + vmvn(al, dt, DRegister(rd), imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, DRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf2800910 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xf2800910 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VORR{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vorr(al, + dt, + DRegister(rd), + DRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xf2800930 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VBIC{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vbic(al, + dt, + DRegister(rd), + DRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + case 0x00180000: { + // 0xf2980810 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2980810 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2980810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_3_Decode((instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; A1 NOLINT(whitespace/line_length) + vshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x01000000: { + // 0xf3980810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrun(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xf2980910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xf2980a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00280000: { + // 0xf2a80810 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2a80810 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2a80810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_3_Decode((instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; A1 NOLINT(whitespace/line_length) + vshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x01000000: { + // 0xf3a80810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrun(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xf2a80910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xf2a80a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00300000: { + // 0xf2b00810 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2b00810 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2b00810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_3_Decode((instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; A1 NOLINT(whitespace/line_length) + vshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x01000000: { + // 0xf3b00810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrun(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xf2b00910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xf2b00a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00380000: { + // 0xf2b80810 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2b80810 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2b80810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_3_Decode((instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; A1 NOLINT(whitespace/line_length) + vshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x01000000: { + // 0xf3b80810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrun(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xf2b80910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xf2b80a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2800810 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2800810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_3_Decode((instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VSHRN{}{}.I
, , # ; A1 NOLINT(whitespace/line_length) + vshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x01000000: { + // 0xf3800810 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRUN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrun(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xf2800910 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQSHRN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x00000200: { + // 0xf2800a10 + switch (instr & 0x00070000) { + case 0x00000000: { + // 0xf2800a10 + switch (instr & 0x003f0000) { + case 0x00080000: { + // 0xf2880a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x380000) == 0x180000) || + ((instr & 0x380000) == 0x280000) || + ((instr & 0x380000) == 0x300000) || + ((instr & 0x380000) == 0x380000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_imm3H_1_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 21) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VMOVL{}{}.
, ; A1 + vmovl(al, + dt, + QRegister(rd), + DRegister(rm)); + break; + } + case 0x00090000: { + // 0xf2890a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000a0000: { + // 0xf28a0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000b0000: { + // 0xf28b0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000c0000: { + // 0xf28c0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000d0000: { + // 0xf28d0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000e0000: { + // 0xf28e0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x000f0000: { + // 0xf28f0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00100000: { + // 0xf2900a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x380000) == 0x180000) || + ((instr & 0x380000) == 0x280000) || + ((instr & 0x380000) == 0x300000) || + ((instr & 0x380000) == 0x380000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_imm3H_1_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 21) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VMOVL{}{}.
, ; A1 + vmovl(al, + dt, + QRegister(rd), + DRegister(rm)); + break; + } + case 0x00110000: { + // 0xf2910a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00120000: { + // 0xf2920a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00130000: { + // 0xf2930a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00140000: { + // 0xf2940a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00150000: { + // 0xf2950a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00160000: { + // 0xf2960a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00170000: { + // 0xf2970a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00180000: { + // 0xf2980a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00190000: { + // 0xf2990a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001a0000: { + // 0xf29a0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001b0000: { + // 0xf29b0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001c0000: { + // 0xf29c0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001d0000: { + // 0xf29d0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001e0000: { + // 0xf29e0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x001f0000: { + // 0xf29f0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00200000: { + // 0xf2a00a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x380000) == 0x180000) || + ((instr & 0x380000) == 0x280000) || + ((instr & 0x380000) == 0x300000) || + ((instr & 0x380000) == 0x380000)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_U_imm3H_1_Decode( + ((instr >> 19) & 0x7) | + ((instr >> 21) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + // VMOVL{}{}.
, ; A1 + vmovl(al, + dt, + QRegister(rd), + DRegister(rm)); + break; + } + case 0x00210000: { + // 0xf2a10a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00220000: { + // 0xf2a20a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00230000: { + // 0xf2a30a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00240000: { + // 0xf2a40a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00250000: { + // 0xf2a50a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00260000: { + // 0xf2a60a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00270000: { + // 0xf2a70a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00280000: { + // 0xf2a80a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00290000: { + // 0xf2a90a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002a0000: { + // 0xf2aa0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002b0000: { + // 0xf2ab0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002c0000: { + // 0xf2ac0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002d0000: { + // 0xf2ad0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002e0000: { + // 0xf2ae0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x002f0000: { + // 0xf2af0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00300000: { + // 0xf2b00a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00310000: { + // 0xf2b10a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00320000: { + // 0xf2b20a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00330000: { + // 0xf2b30a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00340000: { + // 0xf2b40a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00350000: { + // 0xf2b50a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00360000: { + // 0xf2b60a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00370000: { + // 0xf2b70a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00380000: { + // 0xf2b80a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x00390000: { + // 0xf2b90a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003a0000: { + // 0xf2ba0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003b0000: { + // 0xf2bb0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003c0000: { + // 0xf2bc0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003d0000: { + // 0xf2bd0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003e0000: { + // 0xf2be0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + case 0x003f0000: { + // 0xf2bf0a10 + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & + 0x7, + (instr >> 24) & + 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = + ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: { + if (((instr & 0x380000) == 0x0) || + ((instr & 0x3f0000) == 0x80000) || + ((instr & 0x3f0000) == 0x100000) || + ((instr & 0x3f0000) == 0x200000)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_4_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = imm6 - dt.GetSize(); + // VSHLL{}{}. , , # ; A1 NOLINT(whitespace/line_length) + vshll(al, + dt, + QRegister(rd), + DRegister(rm), + imm); + break; + } + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000c00: { + // 0xf2800c10 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf2800c10 + switch (instr & 0x00200000) { + case 0x00000000: { + // 0xf2800c10 + switch (instr & 0x00180000) { + case 0x00000000: { + // 0xf2800c10 + switch (instr & 0x00000300) { + case 0x00000200: { + // 0xf2800e10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000300: { + // 0xf2800f10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, DRegister(rd), imm); + break; + } + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xf2800c30 + switch (instr & 0x00000f20) { + case 0x00000000: { + // 0xf2800c10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000020: { + // 0xf2800c30 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, DRegister(rd), imm); + break; + } + case 0x00000200: { + // 0xf2800e10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000220: { + // 0xf2800e30 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, DRegister(rd), imm); + break; + } + case 0x00000400: { + // 0xf2800c10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000420: { + // 0xf2800c30 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, DRegister(rd), imm); + break; + } + case 0x00000600: { + // 0xf2800e10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000620: { + // 0xf2800e30 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, DRegister(rd), imm); + break; + } + case 0x00000800: { + // 0xf2800c10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000820: { + // 0xf2800c30 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, DRegister(rd), imm); + break; + } + case 0x00000a00: { + // 0xf2800e10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000a20: { + // 0xf2800e30 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, DRegister(rd), imm); + break; + } + case 0x00000c00: { + // 0xf2800c10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000c20: { + // 0xf2800c30 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, DRegister(rd), imm); + break; + } + case 0x00000d00: { + // 0xf2800d10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000d20: { + // 0xf2800d30 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, DRegister(rd), imm); + break; + } + case 0x00000e00: { + // 0xf2800e10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000e20: { + // 0xf2800e30 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + case 0x00000f00: { + // 0xf2800f10 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, DRegister(rd), imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + DOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, DRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: { + if ((instr & 0x00000200) == 0x00000200) { + if (((instr & 0x200000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt1 = Dt_op_U_1_Decode1( + ((instr >> 24) & 0x1) | ((instr >> 7) & 0x2)); + if (dt1.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DataType dt2 = Dt_op_U_1_Decode2( + ((instr >> 24) & 0x1) | ((instr >> 7) & 0x2)); + if (dt2.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + uint32_t fbits = 64 - ((instr >> 16) & 0x3f); + // VCVT{}{}.
.
, , # ; A1 NOLINT(whitespace/line_length) + vcvt(al, + dt1, + dt2, + DRegister(rd), + DRegister(rm), + fbits); + } else { + UnallocatedA32(instr); + } + break; + } + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + case 0x00000040: { + // 0xf2800050 + switch (instr & 0x00000c00) { + case 0x00000000: { + // 0xf2800050 + switch (instr & 0x00380080) { + case 0x00000000: { + // 0xf2800050 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800050 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xf2800070 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 + vmvn(al, dt, QRegister(rd), imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, QRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf2800150 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xf2800150 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + QOperand imm = ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VORR{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vorr(al, dt, QRegister(rd), QRegister(rd), imm); + break; + } + case 0x00000020: { + // 0xf2800170 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + QOperand imm = ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VBIC{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vbic(al, dt, QRegister(rd), QRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2800050 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VSHR{}{}. {}, , # ; A1 NOLINT(whitespace/line_length) + vshr(al, dt, QRegister(rd), QRegister(rm), imm); + break; + } + case 0x00000100: { + // 0xf2800150 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VSRA{}{}. {}, , # ; A1 NOLINT(whitespace/line_length) + vsra(al, dt, QRegister(rd), QRegister(rm), imm); + break; + } + case 0x00000200: { + // 0xf2800250 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VRSHR{}{}. {}, , # ; A1 NOLINT(whitespace/line_length) + vrshr(al, dt, QRegister(rd), QRegister(rm), imm); + break; + } + case 0x00000300: { + // 0xf2800350 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VRSRA{}{}. {}, , # ; A1 NOLINT(whitespace/line_length) + vrsra(al, dt, QRegister(rd), QRegister(rm), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000400: { + // 0xf2800450 + switch (instr & 0x00380080) { + case 0x00000000: { + // 0xf2800450 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800450 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xf2800470 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 + vmvn(al, dt, QRegister(rd), imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, QRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf2800550 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xf2800550 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + QOperand imm = ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VORR{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vorr(al, dt, QRegister(rd), QRegister(rd), imm); + break; + } + case 0x00000020: { + // 0xf2800570 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + QOperand imm = ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VBIC{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vbic(al, dt, QRegister(rd), QRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2800450 + if ((instr & 0x01000000) == 0x01000000) { + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_L_imm6_4_Decode( + ((instr >> 19) & 0x7) | ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + (dt.IsSize(64) ? 64 : (dt.GetSize() * 2)) - + imm6; + // VSRI{}{}.
{}, , # ; A1 + vsri(al, dt, QRegister(rd), QRegister(rm), imm); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000100: { + // 0xf2800550 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2800550 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_3_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VSHL{}{}.I {}, , # ; A1 NOLINT(whitespace/line_length) + vshl(al, dt, QRegister(rd), QRegister(rm), imm); + break; + } + case 0x01000000: { + // 0xf3800550 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_4_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VSLI{}{}.
{}, , # ; A1 + vsli(al, dt, QRegister(rd), QRegister(rm), imm); + break; + } + } + break; + } + case 0x00000200: { + // 0xf2800650 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_2_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VQSHLU{}{}. {}, , # ; A1 NOLINT(whitespace/line_length) + vqshlu(al, dt, QRegister(rd), QRegister(rm), imm); + break; + } + case 0x00000300: { + // 0xf2800750 + if (((instr & 0x380080) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_L_imm6_1_Decode(((instr >> 19) & 0x7) | + ((instr >> 4) & 0x8), + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = + imm6 - (dt.IsSize(64) ? 0 : dt.GetSize()); + // VQSHL{}{}. {}, , # ; A1 NOLINT(whitespace/line_length) + vqshl(al, dt, QRegister(rd), QRegister(rm), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000800: { + // 0xf2800850 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf2800850 + switch (instr & 0x00380000) { + case 0x00000000: { + // 0xf2800850 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0xf2800850 + switch (instr & 0x00000200) { + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xf2800870 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 + vmvn(al, dt, QRegister(rd), imm); + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, QRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf2800950 + switch (instr & 0x00000020) { + case 0x00000000: { + // 0xf2800950 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVorr::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVorr::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VORR{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vorr(al, + dt, + QRegister(rd), + QRegister(rd), + imm); + break; + } + case 0x00000020: { + // 0xf2800970 + if (((instr & 0x100) == 0x0) || + ((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVbic::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVbic::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VBIC{}{}.
{}, , # ; A1 NOLINT(whitespace/line_length) + vbic(al, + dt, + QRegister(rd), + QRegister(rd), + imm); + break; + } + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf2800850 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0xf2800850 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_3_Decode((instr >> 19) & 0x7); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VRSHRN{}{}.I
, , # ; A1 NOLINT(whitespace/line_length) + vrshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + case 0x01000000: { + // 0xf3800850 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_2_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQRSHRUN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqrshrun(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + } + break; + } + case 0x00000100: { + // 0xf2800950 + if (((instr & 0x380000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_imm6_1_Decode((instr >> 19) & 0x7, + (instr >> 24) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t imm6 = (instr >> 16) & 0x3f; + uint32_t imm = dt.GetSize() - imm6; + // VQRSHRN{}{}.
, , # ; A1 NOLINT(whitespace/line_length) + vqrshrn(al, + dt, + DRegister(rd), + QRegister(rm), + imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000c00: { + // 0xf2800c50 + switch (instr & 0x00000080) { + case 0x00000000: { + // 0xf2800c50 + switch (instr & 0x00200000) { + case 0x00000000: { + // 0xf2800c50 + switch (instr & 0x00180000) { + case 0x00000000: { + // 0xf2800c50 + switch (instr & 0x00000300) { + case 0x00000200: { + // 0xf2800e50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000300: { + // 0xf2800f50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, QRegister(rd), imm); + break; + } + default: { + switch (instr & 0x00000020) { + case 0x00000020: { + // 0xf2800c70 + switch (instr & 0x00000f20) { + case 0x00000000: { + // 0xf2800c50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000020: { + // 0xf2800c70 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, QRegister(rd), imm); + break; + } + case 0x00000200: { + // 0xf2800e50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000220: { + // 0xf2800e70 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, QRegister(rd), imm); + break; + } + case 0x00000400: { + // 0xf2800c50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000420: { + // 0xf2800c70 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, QRegister(rd), imm); + break; + } + case 0x00000600: { + // 0xf2800e50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000620: { + // 0xf2800e70 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, QRegister(rd), imm); + break; + } + case 0x00000800: { + // 0xf2800c50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000820: { + // 0xf2800c70 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, QRegister(rd), imm); + break; + } + case 0x00000a00: { + // 0xf2800e50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000a20: { + // 0xf2800e70 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, QRegister(rd), imm); + break; + } + case 0x00000c00: { + // 0xf2800c50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000c20: { + // 0xf2800c70 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, QRegister(rd), imm); + break; + } + case 0x00000d00: { + // 0xf2800d50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000d20: { + // 0xf2800d70 + if (((instr & 0xd00) == 0x100) || + ((instr & 0xd00) == 0x500) || + ((instr & 0xd00) == 0x900) || + ((instr & 0xe00) == 0xe00)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = (instr >> 8) & 0xf; + DataType dt = + ImmediateVmvn::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmvn::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMVN{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmvn(al, dt, QRegister(rd), imm); + break; + } + case 0x00000e00: { + // 0xf2800e50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000e20: { + // 0xf2800e70 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + case 0x00000f00: { + // 0xf2800f50 + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = + ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 NOLINT(whitespace/line_length) + vmov(al, dt, QRegister(rd), imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: { + if (((instr & 0x920) == 0x100) || + ((instr & 0x520) == 0x100) || + ((instr & 0x820) == 0x20) || + ((instr & 0x420) == 0x20) || + ((instr & 0x220) == 0x20) || + ((instr & 0x120) == 0x120)) { + UnallocatedA32(instr); + return; + } + unsigned cmode = ((instr >> 8) & 0xf) | + ((instr >> 1) & 0x10); + DataType dt = + ImmediateVmov::DecodeDt(cmode); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = + ExtractQRegister(instr, 22, 12); + QOperand imm = + ImmediateVmov::DecodeImmediate( + cmode, + (instr & 0xf) | + ((instr >> 12) & 0x70) | + ((instr >> 17) & 0x80)); + // VMOV{}{}.
, # ; A1 + vmov(al, dt, QRegister(rd), imm); + break; + } + } + break; + } + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: { + if ((instr & 0x00000200) == 0x00000200) { + if (((instr & 0x200000) == 0x0)) { + UnallocatedA32(instr); + return; + } + DataType dt1 = Dt_op_U_1_Decode1( + ((instr >> 24) & 0x1) | ((instr >> 7) & 0x2)); + if (dt1.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DataType dt2 = Dt_op_U_1_Decode2( + ((instr >> 24) & 0x1) | ((instr >> 7) & 0x2)); + if (dt2.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 12) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 22, 12); + if ((instr & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rm = ExtractQRegister(instr, 5, 0); + uint32_t fbits = 64 - ((instr >> 16) & 0x3f); + // VCVT{}{}.
.
, , # ; A1 NOLINT(whitespace/line_length) + vcvt(al, + dt1, + dt2, + QRegister(rd), + QRegister(rm), + fbits); + } else { + UnallocatedA32(instr); + } + break; + } + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + } + break; + } + } + break; + } + case 0x04000000: { + // 0xf4000000 + switch (instr & 0x01300000) { + case 0x00000000: { + // 0xf4000000 + switch (instr & 0x00800000) { + case 0x00000000: { + // 0xf4000000 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf400000d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf400000d + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf400000d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000100: { + // 0xf400010d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000200: { + // 0xf400020d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000300: { + // 0xf400030d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000400: { + // 0xf400040d + if (((instr & 0x20) == 0x20)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000500: { + // 0xf400050d + if (((instr & 0x20) == 0x20)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000600: { + // 0xf400060d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000700: { + // 0xf400070d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000800: { + // 0xf400080d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000900: { + // 0xf400090d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000a00: { + // 0xf4000a0d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000002: { + // 0xf400000f + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf400000d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}] ; A1 + vst4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000100: { + // 0xf400010d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}] ; A1 + vst4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000200: { + // 0xf400020d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; A1 + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000300: { + // 0xf400030d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}] ; A1 + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000400: { + // 0xf400040d + if (((instr & 0x20) == 0x20)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [{:}] ; A1 + vst3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000500: { + // 0xf400050d + if (((instr & 0x20) == 0x20)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [{:}] ; A1 + vst3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000600: { + // 0xf400060d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; A1 + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000700: { + // 0xf400070d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; A1 + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000800: { + // 0xf400080d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}] ; A1 + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000900: { + // 0xf400090d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}] ; A1 + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000a00: { + // 0xf4000a0d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; A1 + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf4000000 + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST4{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000100: { + // 0xf4000100 + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST4{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000200: { + // 0xf4000200 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000300: { + // 0xf4000300 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST2{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000400: { + // 0xf4000400 + if (((instr & 0xd) == 0xd) || + ((instr & 0x20) == 0x20)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST3{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000500: { + // 0xf4000500 + if (((instr & 0xd) == 0xd) || + ((instr & 0x20) == 0x20)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST3{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000600: { + // 0xf4000600 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000700: { + // 0xf4000700 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000800: { + // 0xf4000800 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST2{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000900: { + // 0xf4000900 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST2{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000a00: { + // 0xf4000a00 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_5_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + case 0x00800000: { + // 0xf4800000 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf4800000 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf4800c00 + UnallocatedA32(instr); + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf480000d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf480000d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf480000f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + // VST1{}{}.
, [{:}] ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf4800100 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf4800d00 + UnallocatedA32(instr); + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf480010d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf480010d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf480010f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST2{}{}.
, [{:}] ; A1 NOLINT(whitespace/line_length) + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST2{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000200: { + // 0xf4800200 + switch (instr & 0x00000c30) { + case 0x00000010: { + // 0xf4800210 + UnallocatedA32(instr); + break; + } + case 0x00000030: { + // 0xf4800230 + UnallocatedA32(instr); + break; + } + case 0x00000410: { + // 0xf4800610 + UnallocatedA32(instr); + break; + } + case 0x00000430: { + // 0xf4800630 + UnallocatedA32(instr); + break; + } + case 0x00000810: { + // 0xf4800a10 + UnallocatedA32(instr); + break; + } + case 0x00000820: { + // 0xf4800a20 + UnallocatedA32(instr); + break; + } + case 0x00000830: { + // 0xf4800a30 + UnallocatedA32(instr); + break; + } + case 0x00000c00: { + // 0xf4800e00 + UnallocatedA32(instr); + break; + } + case 0x00000c10: { + // 0xf4800e10 + UnallocatedA32(instr); + break; + } + case 0x00000c20: { + // 0xf4800e20 + UnallocatedA32(instr); + break; + } + case 0x00000c30: { + // 0xf4800e30 + UnallocatedA32(instr); + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf480020d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf480020d + if (((instr & 0xc00) == 0xc00) || + ((instr & 0x810) == 0x10) || + ((instr & 0xc30) == 0x810) || + ((instr & 0xc30) == 0x820) || + ((instr & 0xc30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, []! ; A1 + vst3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), PostIndex)); + break; + } + case 0x00000002: { + // 0xf480020f + if (((instr & 0xc00) == 0xc00) || + ((instr & 0x810) == 0x10) || + ((instr & 0xc30) == 0x810) || + ((instr & 0xc30) == 0x820) || + ((instr & 0xc30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST3{}{}.
, [] ; A1 + vst3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd) || + ((instr & 0x810) == 0x10) || + ((instr & 0xc30) == 0x810) || + ((instr & 0xc30) == 0x820) || + ((instr & 0xc30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + Sign sign(plus); + unsigned rm = instr & 0xf; + // VST3{}{}.
, [], # ; A1 + vst3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), + sign, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000300: { + // 0xf4800300 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf4800f00 + UnallocatedA32(instr); + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf480030d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf480030d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vst4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf480030f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VST4{}{}.
, [{:}] ; A1 NOLINT(whitespace/line_length) + vst4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VST4{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vst4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + } + break; + } + } + break; + } + case 0x00100000: { + // 0xf4100000 + switch (instr & 0x00400000) { + case 0x00400000: { + // 0xf4500000 + switch (instr & 0x000f0000) { + case 0x000f0000: { + // 0xf45f0000 + uint32_t U = (instr >> 23) & 0x1; + int32_t imm = instr & 0xfff; + if (U == 0) imm = -imm; + bool minus_zero = (imm == 0) && (U == 0); + Location location(imm, kA32PcDelta); + // PLI{}{}
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000100: { + // 0xf420010d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000200: { + // 0xf420020d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000300: { + // 0xf420030d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000400: { + // 0xf420040d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000500: { + // 0xf420050d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000600: { + // 0xf420060d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000700: { + // 0xf420070d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000800: { + // 0xf420080d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000900: { + // 0xf420090d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000a00: { + // 0xf4200a0d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000002: { + // 0xf420000f + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf420000d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}] ; A1 + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000100: { + // 0xf420010d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}] ; A1 + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000200: { + // 0xf420020d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; A1 + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000300: { + // 0xf420030d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; A1 + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000400: { + // 0xf420040d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [{:}] ; A1 + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000500: { + // 0xf420050d + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [{:}] ; A1 + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000600: { + // 0xf420060d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; A1 + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000700: { + // 0xf420070d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; A1 + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000800: { + // 0xf420080d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; A1 + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000900: { + // 0xf420090d + if (((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; A1 + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + case 0x00000a00: { + // 0xf4200a0d + if (((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; A1 + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + default: { + switch (instr & 0x00000f00) { + case 0x00000000: { + // 0xf4200000 + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD4{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000100: { + // 0xf4200100 + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_4_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD4{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000200: { + // 0xf4200200 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000300: { + // 0xf4200300 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000400: { + // 0xf4200400 + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD3{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000500: { + // 0xf4200500 + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_3_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x4: + length = 3; + spacing = kSingle; + break; + case 0x5: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD3{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000600: { + // 0xf4200600 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000700: { + // 0xf4200700 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000800: { + // 0xf4200800 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000900: { + // 0xf4200900 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe30) == 0x830)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_2_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x8: + length = 2; + spacing = kSingle; + break; + case 0x9: + length = 2; + spacing = kDouble; + break; + case 0x3: + length = 4; + spacing = kSingle; + break; + } + unsigned last = + first + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + case 0x00000a00: { + // 0xf4200a00 + if (((instr & 0xd) == 0xd) || + ((instr & 0xe20) == 0x620) || + ((instr & 0xf30) == 0xa30)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_6_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_align_1_Decode((instr >> 4) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 8) & 0xf) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x7: + length = 1; + break; + case 0xa: + length = 2; + break; + case 0x6: + length = 3; + break; + case 0x2: + length = 4; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kMultipleLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + case 0x00800000: { + // 0xf4a00000 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0xf4a00000 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf4a00c00 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf4a00c0d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf4a00c0d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_a_1_Decode((instr >> 4) & 0x1, dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf4a00c0f + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_a_1_Decode((instr >> 4) & 0x1, dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_a_1_Decode((instr >> 4) & 0x1, dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing = kSingle; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 1; + break; + case 0x1: + length = 2; + break; + } + unsigned last = first + length - 1; + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf4a0000d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf4a0000d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf4a0000f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + // VLD1{}{}.
, [{:}] ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_1_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length = 1; + unsigned last = first + length - 1; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD1{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld1(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000100: { + // 0xf4a00100 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf4a00d00 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf4a00d0d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf4a00d0d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_a_2_Decode((instr >> 4) & 0x1, dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 2; + spacing = kSingle; + break; + case 0x1: + length = 2; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf4a00d0f + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_a_2_Decode((instr >> 4) & 0x1, dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 2; + spacing = kSingle; + break; + case 0x1: + length = 2; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_a_2_Decode((instr >> 4) & 0x1, dt); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 2; + spacing = kSingle; + break; + case 0x1: + length = 2; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf4a0010d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf4a0010d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf4a0010f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD2{}{}.
, [{:}] ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_2_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length = 2; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD2{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld2(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000200: { + // 0xf4a00200 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf4a00e00 + switch (instr & 0x00000010) { + case 0x00000000: { + // 0xf4a00e00 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf4a00e0d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf4a00e0d + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 3; + spacing = kSingle; + break; + case 0x1: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, []! ; A1 + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + MemOperand(Register(rn), PostIndex)); + break; + } + case 0x00000002: { + // 0xf4a00e0f + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 3; + spacing = kSingle; + break; + case 0x1: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * + (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [] ; A1 + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + MemOperand(Register(rn), Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 3; + spacing = kSingle; + break; + case 0x1: + length = 3; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(plus); + unsigned rm = instr & 0xf; + // VLD3{}{}.
, [], # ; A1 NOLINT(whitespace/line_length) + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + MemOperand(Register(rn), + sign, + Register(rm), + PostIndex)); + break; + } + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf4a0020d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf4a0020d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, []! ; A1 + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), PostIndex)); + break; + } + case 0x00000002: { + // 0xf4a0020f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD3{}{}.
, [] ; A1 + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeon decode_neon = + Index_1_Decode((instr >> 4) & 0xf, dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length = 3; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + Sign sign(plus); + unsigned rm = instr & 0xf; + // VLD3{}{}.
, [], # ; A1 + vld3(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + MemOperand(Register(rn), + sign, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + case 0x00000300: { + // 0xf4a00300 + switch (instr & 0x00000c00) { + case 0x00000c00: { + // 0xf4a00f00 + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf4a00f0d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf4a00f0d + DataType dt = + Dt_size_8_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_a_3_Decode((instr >> 4) & 0x1, + dt, + (instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf4a00f0f + DataType dt = + Dt_size_8_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_a_3_Decode((instr >> 4) & 0x1, + dt, + (instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}] ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_8_Decode((instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + Alignment align = + Align_a_3_Decode((instr >> 4) & 0x1, + dt, + (instr >> 6) & 0x3); + if (dt.Is(kDataTypeValueInvalid) || + align.Is(kBadAlignment)) { + UnallocatedA32(instr); + return; + } + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length; + SpacingType spacing; + switch ((instr >> 5) & 0x1) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case 0x0: + length = 4; + spacing = kSingle; + break; + case 0x1: + length = 4; + spacing = kDouble; + break; + } + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + TransferType transfer = kAllLanes; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD4{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + transfer), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + default: { + switch (instr & 0x0000000d) { + case 0x0000000d: { + // 0xf4a0030d + switch (instr & 0x00000002) { + case 0x00000000: { + // 0xf4a0030d + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}]! ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + PostIndex)); + break; + } + case 0x00000002: { + // 0xf4a0030f + if (((instr & 0xc00) == 0xc00)) { + UnallocatedA32(instr); + return; + } + DataType dt = + Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> 4) & + 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = + ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + // VLD4{}{}.
, [{:}] ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Offset)); + break; + } + } + break; + } + default: { + if (((instr & 0xc00) == 0xc00) || + ((instr & 0xd) == 0xd)) { + UnallocatedA32(instr); + return; + } + DataType dt = Dt_size_7_Decode((instr >> 10) & 0x3); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + DecodeNeonAndAlign decode_neon = + Align_index_align_3_Decode((instr >> 4) & 0xf, + dt); + if (!decode_neon.IsValid()) { + UnallocatedA32(instr); + return; + } + Alignment align = decode_neon.GetAlign(); + int lane = decode_neon.GetLane(); + SpacingType spacing = decode_neon.GetSpacing(); + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned length = 4; + unsigned last = + first + + (length - 1) * (spacing == kSingle ? 1 : 2); + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // VLD4{}{}.
, [{:}], ; A1 NOLINT(whitespace/line_length) + vld4(al, + dt, + NeonRegisterList(DRegister(first), + DRegister(last), + spacing, + lane), + AlignedMemOperand(Register(rn), + align, + Register(rm), + PostIndex)); + break; + } + } + break; + } + } + break; + } + } + break; + } + } + break; + } + case 0x01100000: { + // 0xf5100000 + switch (instr & 0x000f0000) { + case 0x000f0000: { + // 0xf51f0000 + uint32_t U = (instr >> 23) & 0x1; + int32_t imm = instr & 0xfff; + if (U == 0) imm = -imm; + bool minus_zero = (imm == 0) && (U == 0); + Location location(imm, kA32PcDelta); + // PLD{}{}
, , ; A1 + vseleq(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00100a00: { + // 0xfe100a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSELVS.F32 , , ; A1 + vselvs(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + break; + } + case 0x00100b00: { + // 0xfe100b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSELVS.F64
, , ; A1 + vselvs(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00200a00: { + // 0xfe200a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSELGE.F32 , , ; A1 + vselge(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + break; + } + case 0x00200b00: { + // 0xfe200b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSELGE.F64
, , ; A1 + vselge(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00300a00: { + // 0xfe300a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSELGT.F32 , , ; A1 + vselgt(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + break; + } + case 0x00300b00: { + // 0xfe300b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSELGT.F64
, , ; A1 + vselgt(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00800a00: { + // 0xfe800a00 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMAXNM{}.F32 , , ; A2 + vmaxnm(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + break; + } + case 0x00800a40: { + // 0xfe800a40 + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMINNM{}.F32 , , ; A2 + vminnm(F32, SRegister(rd), SRegister(rn), SRegister(rm)); + break; + } + case 0x00800b00: { + // 0xfe800b00 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMAXNM{}.F64
, , ; A2 + vmaxnm(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00800b40: { + // 0xfe800b40 + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMINNM{}.F64
, , ; A2 + vminnm(F64, DRegister(rd), DRegister(rn), DRegister(rm)); + break; + } + case 0x00b00a40: { + // 0xfeb00a40 + switch (instr & 0x000f0000) { + case 0x00080000: { + // 0xfeb80a40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTA{}.F32 , ; A1 + vrinta(F32, SRegister(rd), SRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00090000: { + // 0xfeb90a40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTN{}.F32 , ; A1 + vrintn(F32, SRegister(rd), SRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000a0000: { + // 0xfeba0a40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTP{}.F32 , ; A1 + vrintp(F32, SRegister(rd), SRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000b0000: { + // 0xfebb0a40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTM{}.F32 , ; A1 + vrintm(F32, SRegister(rd), SRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000c0000: { + // 0xfebc0a40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTA{}.
.F32 , ; A1 + vcvta(dt, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x000d0000: { + // 0xfebd0a40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTN{}.
.F32 , ; A1 + vcvtn(dt, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x000e0000: { + // 0xfebe0a40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTP{}.
.F32 , ; A1 + vcvtp(dt, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x000f0000: { + // 0xfebf0a40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTM{}.
.F32 , ; A1 + vcvtm(dt, F32, SRegister(rd), SRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00b00b40: { + // 0xfeb00b40 + switch (instr & 0x000f0000) { + case 0x00080000: { + // 0xfeb80b40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTA{}.F64
, ; A1 + vrinta(F64, DRegister(rd), DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00090000: { + // 0xfeb90b40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTN{}.F64
, ; A1 + vrintn(F64, DRegister(rd), DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000a0000: { + // 0xfeba0b40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTP{}.F64
, ; A1 + vrintp(F64, DRegister(rd), DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000b0000: { + // 0xfebb0b40 + if ((instr & 0x00000080) == 0x00000000) { + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTM{}.F64
, ; A1 + vrintm(F64, DRegister(rd), DRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000c0000: { + // 0xfebc0b40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTA{}.
.F64 , ; A1 + vcvta(dt, F64, SRegister(rd), DRegister(rm)); + break; + } + case 0x000d0000: { + // 0xfebd0b40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTN{}.
.F64 , ; A1 + vcvtn(dt, F64, SRegister(rd), DRegister(rm)); + break; + } + case 0x000e0000: { + // 0xfebe0b40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTP{}.
.F64 , ; A1 + vcvtp(dt, F64, SRegister(rd), DRegister(rm)); + break; + } + case 0x000f0000: { + // 0xfebf0b40 + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTM{}.
.F64 , ; A1 + vcvtm(dt, F64, SRegister(rd), DRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: + UnallocatedA32(instr); + break; + } + } else { + switch (instr & 0x0e000000) { + case 0x00000000: { + // 0x00000000 + switch (instr & 0x00100010) { + case 0x00000000: { + // 0x00000000 + switch (instr & 0x01a00000) { + case 0x00000000: { + // 0x00000000 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x00000000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x00000060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // AND{}{} {}, , , RRX ; A1 + and_(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // AND{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + and_(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + case 0x00400000: { + // 0x00400000 + switch (instr & 0x000f0000) { + case 0x000d0000: { + // 0x004d0000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x004d0060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + // SUB{}{} {}, SP, , RRX ; A1 + sub(condition, + Best, + Register(rd), + sp, + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & + 0x3, + (instr >> 7) & + 0x1f); + // SUB{}{} {}, SP, {, # } ; A1 NOLINT(whitespace/line_length) + sub(condition, + Best, + Register(rd), + sp, + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + default: { + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x00400060 + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xf0000) == 0xd0000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // SUB{}{} {}, , , RRX ; A1 + sub(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xf0000) == 0xd0000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & + 0x3, + (instr >> 7) & + 0x1f); + // SUB{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + sub(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + } + break; + } + } + break; + } + case 0x00200000: { + // 0x00200000 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x00200000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x00200060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // EOR{}{} {}, , , RRX ; A1 + eor(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // EOR{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + eor(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + case 0x00400000: { + // 0x00600000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x00600060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // RSB{}{} {}, , , RRX ; A1 + rsb(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // RSB{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + rsb(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + } + break; + } + case 0x00800000: { + // 0x00800000 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x00800000 + switch (instr & 0x000f0000) { + case 0x000d0000: { + // 0x008d0000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x008d0060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + // ADD{}{} {}, SP, , RRX ; A1 + add(condition, + Best, + Register(rd), + sp, + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & + 0x3, + (instr >> 7) & + 0x1f); + // ADD{}{} {}, SP, {, # } ; A1 NOLINT(whitespace/line_length) + add(condition, + Best, + Register(rd), + sp, + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + default: { + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x00800060 + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xf0000) == 0xd0000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // ADD{}{} {}, , , RRX ; A1 + add(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xf0000) == 0xd0000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & + 0x3, + (instr >> 7) & + 0x1f); + // ADD{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + add(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + } + break; + } + case 0x00400000: { + // 0x00c00000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x00c00060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // SBC{}{} {}, , , RRX ; A1 + sbc(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // SBC{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + sbc(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + } + break; + } + case 0x00a00000: { + // 0x00a00000 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x00a00000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x00a00060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // ADC{}{} {}, , , RRX ; A1 + adc(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // ADC{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + adc(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + case 0x00400000: { + // 0x00e00000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x00e00060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // RSC{}{} {}, , , RRX ; A1 + rsc(condition, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // RSC{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + rsc(condition, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + } + break; + } + case 0x01000000: { + // 0x01000000 + switch (instr & 0x000000e0) { + case 0x00000000: { + // 0x01000000 + switch (instr & 0x00000200) { + case 0x00000000: { + // 0x01000000 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned spec_reg = (instr >> 22) & 0x1; + // MRS{}{} , ; A1 + mrs(condition, Register(rd), SpecialRegister(spec_reg)); + if (((instr & 0xfbf0fff) != 0x10f0000)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000200: { + // 0x01000200 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("MRS", instr); + break; + } + } + break; + } + case 0x00000040: { + // 0x01000040 + switch (instr & 0x00400200) { + case 0x00000000: { + // 0x01000040 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // CRC32B{} , , ; A1 + crc32b(al, Register(rd), Register(rn), Register(rm)); + if (((instr & 0xff00ff0) != 0x1000040)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000200: { + // 0x01000240 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // CRC32CB{} , , ; A1 + crc32cb(al, Register(rd), Register(rn), Register(rm)); + if (((instr & 0xff00ff0) != 0x1000240)) { + UnpredictableA32(instr); + } + break; + } + case 0x00400000: { + // 0x01400040 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // CRC32W{} , , ; A1 + crc32w(al, Register(rd), Register(rn), Register(rm)); + if (((instr & 0xff00ff0) != 0x1400040)) { + UnpredictableA32(instr); + } + break; + } + case 0x00400200: { + // 0x01400240 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // CRC32CW{} , , ; A1 + crc32cw(al, Register(rd), Register(rn), Register(rm)); + if (((instr & 0xff00ff0) != 0x1400240)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x00000080: { + // 0x01000080 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x01000080 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + unsigned ra = (instr >> 12) & 0xf; + // SMLABB{}{} , , , ; A1 + smlabb(condition, + Register(rd), + Register(rn), + Register(rm), + Register(ra)); + break; + } + case 0x00400000: { + // 0x01400080 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rdlo = (instr >> 12) & 0xf; + unsigned rdhi = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMLALBB{}{} , , , ; A1 + smlalbb(condition, + Register(rdlo), + Register(rdhi), + Register(rn), + Register(rm)); + break; + } + } + break; + } + case 0x000000a0: { + // 0x010000a0 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x010000a0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + unsigned ra = (instr >> 12) & 0xf; + // SMLATB{}{} , , , ; A1 + smlatb(condition, + Register(rd), + Register(rn), + Register(rm), + Register(ra)); + break; + } + case 0x00400000: { + // 0x014000a0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rdlo = (instr >> 12) & 0xf; + unsigned rdhi = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMLALTB{}{} , , , ; A1 + smlaltb(condition, + Register(rdlo), + Register(rdhi), + Register(rn), + Register(rm)); + break; + } + } + break; + } + case 0x000000c0: { + // 0x010000c0 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x010000c0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + unsigned ra = (instr >> 12) & 0xf; + // SMLABT{}{} , , , ; A1 + smlabt(condition, + Register(rd), + Register(rn), + Register(rm), + Register(ra)); + break; + } + case 0x00400000: { + // 0x014000c0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rdlo = (instr >> 12) & 0xf; + unsigned rdhi = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMLALBT{}{} , , , ; A1 + smlalbt(condition, + Register(rdlo), + Register(rdhi), + Register(rn), + Register(rm)); + break; + } + } + break; + } + case 0x000000e0: { + // 0x010000e0 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x010000e0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + unsigned ra = (instr >> 12) & 0xf; + // SMLATT{}{} , , , ; A1 + smlatt(condition, + Register(rd), + Register(rn), + Register(rm), + Register(ra)); + break; + } + case 0x00400000: { + // 0x014000e0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rdlo = (instr >> 12) & 0xf; + unsigned rdhi = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMLALTT{}{} , , , ; A1 + smlaltt(condition, + Register(rdlo), + Register(rdhi), + Register(rn), + Register(rm)); + break; + } + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01200000: { + // 0x01200000 + switch (instr & 0x000000e0) { + case 0x00000000: { + // 0x01200000 + switch (instr & 0x00000200) { + case 0x00000000: { + // 0x01200000 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned spec_reg = + ((instr >> 16) & 0xf) | ((instr >> 18) & 0x10); + unsigned rn = instr & 0xf; + // MSR{}{} , ; A1 + msr(condition, + MaskedSpecialRegister(spec_reg), + Register(rn)); + if (((instr & 0xfb0fff0) != 0x120f000)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000200: { + // 0x01200200 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("MSR", instr); + break; + } + } + break; + } + case 0x00000020: { + // 0x01200020 + if ((instr & 0x00400000) == 0x00000000) { + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rm = instr & 0xf; + // BXJ{}{} ; A1 + bxj(condition, Register(rm)); + if (((instr & 0xffffff0) != 0x12fff20)) { + UnpredictableA32(instr); + } + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000040: { + // 0x01200040 + switch (instr & 0x00400200) { + case 0x00000000: { + // 0x01200040 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // CRC32H{} , , ; A1 + crc32h(al, Register(rd), Register(rn), Register(rm)); + if (((instr & 0xff00ff0) != 0x1200040)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000200: { + // 0x01200240 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // CRC32CH{} , , ; A1 + crc32ch(al, Register(rd), Register(rn), Register(rm)); + if (((instr & 0xff00ff0) != 0x1200240)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000060: { + // 0x01200060 + if ((instr & 0x00400000) == 0x00400000) { + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("ERET", instr); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000080: { + // 0x01200080 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x01200080 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + unsigned ra = (instr >> 12) & 0xf; + // SMLAWB{}{} , , , ; A1 + smlawb(condition, + Register(rd), + Register(rn), + Register(rm), + Register(ra)); + break; + } + case 0x00400000: { + // 0x01600080 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMULBB{}{} {}, , ; A1 + smulbb(condition, + Register(rd), + Register(rn), + Register(rm)); + if (((instr & 0xff0f0f0) != 0x1600080)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x000000a0: { + // 0x012000a0 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x012000a0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMULWB{}{} {}, , ; A1 + smulwb(condition, + Register(rd), + Register(rn), + Register(rm)); + if (((instr & 0xff0f0f0) != 0x12000a0)) { + UnpredictableA32(instr); + } + break; + } + case 0x00400000: { + // 0x016000a0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMULTB{}{} {}, , ; A1 + smultb(condition, + Register(rd), + Register(rn), + Register(rm)); + if (((instr & 0xff0f0f0) != 0x16000a0)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x000000c0: { + // 0x012000c0 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x012000c0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + unsigned ra = (instr >> 12) & 0xf; + // SMLAWT{}{} , , , ; A1 + smlawt(condition, + Register(rd), + Register(rn), + Register(rm), + Register(ra)); + break; + } + case 0x00400000: { + // 0x016000c0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMULBT{}{} {}, , ; A1 + smulbt(condition, + Register(rd), + Register(rn), + Register(rm)); + if (((instr & 0xff0f0f0) != 0x16000c0)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x000000e0: { + // 0x012000e0 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x012000e0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMULWT{}{} {}, , ; A1 + smulwt(condition, + Register(rd), + Register(rn), + Register(rm)); + if (((instr & 0xff0f0f0) != 0x12000e0)) { + UnpredictableA32(instr); + } + break; + } + case 0x00400000: { + // 0x016000e0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMULTT{}{} {}, , ; A1 + smultt(condition, + Register(rd), + Register(rn), + Register(rm)); + if (((instr & 0xff0f0f0) != 0x16000e0)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + } + break; + } + case 0x01800000: { + // 0x01800000 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x01800000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x01800060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // ORR{}{} {}, , , RRX ; A1 + orr(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // ORR{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + orr(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + case 0x00400000: { + // 0x01c00000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x01c00060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + // BIC{}{} {}, , , RRX ; A1 + bic(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), RRX)); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // BIC{}{} {}, , {, # } ; A1 NOLINT(whitespace/line_length) + bic(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + break; + } + } + break; + } + } + break; + } + case 0x01a00000: { + // 0x01a00000 + switch (instr & 0x00400000) { + case 0x00000000: { + // 0x01a00000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x01a00060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + if (((instr & 0xf0000000) != 0xf0000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + // RRX{}{} {}, ; A1 + rrx(condition, Register(rd), Register(rm)); + if (((instr & 0xfff0ff0) != 0x1a00060)) { + UnpredictableA32(instr); + } + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + // MOV{}{} , , RRX ; A1 + mov(condition, + Best, + Register(rd), + Operand(Register(rm), RRX)); + if (((instr & 0xfff0ff0) != 0x1a00060)) { + UnpredictableA32(instr); + } + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + if (((Uint32((instr >> 5)) & Uint32(0x3)) == + Uint32(0x2)) && + ((instr & 0xf0000000) != 0xf0000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + uint32_t amount = (instr >> 7) & 0x1f; + if (amount == 0) amount = 32; + // ASR{}{} {}, , # ; A1 + asr(condition, + Best, + Register(rd), + Register(rm), + amount); + if (((instr & 0xfff0070) != 0x1a00040)) { + UnpredictableA32(instr); + } + return; + } + if (((Uint32((instr >> 5)) & Uint32(0x3)) == + Uint32(0x0)) && + ((instr & 0xf0000000) != 0xf0000000) && + ((instr & 0x00000f80) != 0x00000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + uint32_t amount = (instr >> 7) & 0x1f; + // LSL{}{} {}, , # ; A1 + lsl(condition, + Best, + Register(rd), + Register(rm), + amount); + if (((instr & 0xfff0070) != 0x1a00000)) { + UnpredictableA32(instr); + } + return; + } + if (((Uint32((instr >> 5)) & Uint32(0x3)) == + Uint32(0x1)) && + ((instr & 0xf0000000) != 0xf0000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + uint32_t amount = (instr >> 7) & 0x1f; + if (amount == 0) amount = 32; + // LSR{}{} {}, , # ; A1 + lsr(condition, + Best, + Register(rd), + Register(rm), + amount); + if (((instr & 0xfff0070) != 0x1a00020)) { + UnpredictableA32(instr); + } + return; + } + if (((Uint32((instr >> 5)) & Uint32(0x3)) == + Uint32(0x3)) && + ((instr & 0xf0000000) != 0xf0000000) && + ((instr & 0x00000f80) != 0x00000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + uint32_t amount = (instr >> 7) & 0x1f; + // ROR{}{} {}, , # ; A1 + ror(condition, + Best, + Register(rd), + Register(rm), + amount); + if (((instr & 0xfff0070) != 0x1a00060)) { + UnpredictableA32(instr); + } + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // MOV{}{} , {, # } ; A1 + mov(condition, + Best, + Register(rd), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + if (((instr & 0xfff0010) != 0x1a00000)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x00400000: { + // 0x01e00000 + switch (instr & 0x00000fe0) { + case 0x00000060: { + // 0x01e00060 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + // MVN{}{} , , RRX ; A1 + mvn(condition, + Best, + Register(rd), + Operand(Register(rm), RRX)); + if (((instr & 0xfff0ff0) != 0x1e00060)) { + UnpredictableA32(instr); + } + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xfe0) == 0x60)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + ImmediateShiftOperand shift_operand((instr >> 5) & 0x3, + (instr >> 7) & + 0x1f); + // MVN{}{} , {, # } ; A1 + mvn(condition, + Best, + Register(rd), + Operand(Register(rm), + shift_operand.GetType(), + shift_operand.GetAmount())); + if (((instr & 0xfff0010) != 0x1e00000)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + } + break; + } + } + break; + } + case 0x00000010: { + // 0x00000010 + switch (instr & 0x00400080) { + case 0x00000000: { + // 0x00000010 + switch (instr & 0x01a00000) { + case 0x00000000: { + // 0x00000010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // AND{}{} {}, , , ; A1 + and_(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x00200000: { + // 0x00200010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // EOR{}{} {}, , , ; A1 + eor(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x00800000: { + // 0x00800010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // ADD{}{} {}, , , ; A1 + add(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x00a00000: { + // 0x00a00010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // ADC{}{} {}, , , ; A1 + adc(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x01000000: { + // 0x01000010 + switch (instr & 0x00000060) { + case 0x00000040: { + // 0x01000050 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // QADD{}{} {}, , ; A1 + qadd(condition, + Register(rd), + Register(rm), + Register(rn)); + if (((instr & 0xff00ff0) != 0x1000050)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000060: { + // 0x01000070 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + uint32_t imm = (instr & 0xf) | ((instr >> 4) & 0xfff0); + // HLT{} {#} ; A1 + hlt(al, imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01200000: { + // 0x01200010 + switch (instr & 0x00000060) { + case 0x00000000: { + // 0x01200010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rm = instr & 0xf; + // BX{}{} ; A1 + bx(condition, Register(rm)); + if (((instr & 0xffffff0) != 0x12fff10)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000020: { + // 0x01200030 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rm = instr & 0xf; + // BLX{}{} ; A1 + blx(condition, Register(rm)); + if (((instr & 0xffffff0) != 0x12fff30)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000040: { + // 0x01200050 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // QSUB{}{} {}, , ; A1 + qsub(condition, + Register(rd), + Register(rm), + Register(rn)); + if (((instr & 0xff00ff0) != 0x1200050)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000060: { + // 0x01200070 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + uint32_t imm = (instr & 0xf) | ((instr >> 4) & 0xfff0); + // BKPT{} {#} ; A1 + bkpt(al, imm); + break; + } + } + break; + } + case 0x01800000: { + // 0x01800010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // ORR{}{} {}, , , ; A1 + orr(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x01a00000: { + // 0x01a00010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + if (((Uint32((instr >> 5)) & Uint32(0x3)) == Uint32(0x2)) && + ((instr & 0xf0000000) != 0xf0000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + unsigned rs = (instr >> 8) & 0xf; + // ASR{}{} {}, , ; A1 + asr(condition, + Best, + Register(rd), + Register(rm), + Register(rs)); + if (((instr & 0xfff00f0) != 0x1a00050)) { + UnpredictableA32(instr); + } + return; + } + if (((Uint32((instr >> 5)) & Uint32(0x3)) == Uint32(0x0)) && + ((instr & 0xf0000000) != 0xf0000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + unsigned rs = (instr >> 8) & 0xf; + // LSL{}{} {}, , ; A1 + lsl(condition, + Best, + Register(rd), + Register(rm), + Register(rs)); + if (((instr & 0xfff00f0) != 0x1a00010)) { + UnpredictableA32(instr); + } + return; + } + if (((Uint32((instr >> 5)) & Uint32(0x3)) == Uint32(0x1)) && + ((instr & 0xf0000000) != 0xf0000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + unsigned rs = (instr >> 8) & 0xf; + // LSR{}{} {}, , ; A1 + lsr(condition, + Best, + Register(rd), + Register(rm), + Register(rs)); + if (((instr & 0xfff00f0) != 0x1a00030)) { + UnpredictableA32(instr); + } + return; + } + if (((Uint32((instr >> 5)) & Uint32(0x3)) == Uint32(0x3)) && + ((instr & 0xf0000000) != 0xf0000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + unsigned rs = (instr >> 8) & 0xf; + // ROR{}{} {}, , ; A1 + ror(condition, + Best, + Register(rd), + Register(rm), + Register(rs)); + if (((instr & 0xfff00f0) != 0x1a00070)) { + UnpredictableA32(instr); + } + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // MOV{}{} , , ; A1 + mov(condition, + Best, + Register(rd), + Operand(Register(rm), shift.GetType(), Register(rs))); + if (((instr & 0xfff0090) != 0x1a00010)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x00000080: { + // 0x00000090 + switch (instr & 0x01200060) { + case 0x00000000: { + // 0x00000090 + switch (instr & 0x00800000) { + case 0x00000000: { + // 0x00000090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // MUL{}{} , , {} ; A1 + mul(condition, + Best, + Register(rd), + Register(rn), + Register(rm)); + if (((instr & 0xff0f0f0) != 0x90)) { + UnpredictableA32(instr); + } + break; + } + case 0x00800000: { + // 0x00800090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rdlo = (instr >> 12) & 0xf; + unsigned rdhi = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // UMULL{}{} , , , ; A1 + umull(condition, + Register(rdlo), + Register(rdhi), + Register(rn), + Register(rm)); + break; + } + } + break; + } + case 0x00000020: { + // 0x000000b0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(((instr >> 23) & 0x1) == 0 ? minus : plus); + unsigned rm = instr & 0xf; + // STRH{}{} , [], #{+/-} ; A1 + strh(condition, + Best, + Register(rt), + MemOperand(Register(rn), + sign, + Register(rm), + PostIndex)); + if (((instr & 0xf700ff0) != 0xb0)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000040: { + // 0x000000d0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(((instr >> 23) & 0x1) == 0 ? minus : plus); + unsigned rm = instr & 0xf; + // LDRD{}{} , , [], #{+/-} ; A1 + ldrd(condition, + Register(rt), + Register(rt + 1), + MemOperand(Register(rn), + sign, + Register(rm), + PostIndex)); + if (((instr & 0xf700ff0) != 0xd0)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000060: { + // 0x000000f0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(((instr >> 23) & 0x1) == 0 ? minus : plus); + unsigned rm = instr & 0xf; + // STRD{}{} , , [], #{+/-} ; A1 + strd(condition, + Register(rt), + Register(rt + 1), + MemOperand(Register(rn), + sign, + Register(rm), + PostIndex)); + if (((instr & 0xf700ff0) != 0xf0)) { + UnpredictableA32(instr); + } + break; + } + case 0x00200000: { + // 0x00200090 + switch (instr & 0x00800000) { + case 0x00000000: { + // 0x00200090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + unsigned ra = (instr >> 12) & 0xf; + // MLA{}{} , , , ; A1 + mla(condition, + Register(rd), + Register(rn), + Register(rm), + Register(ra)); + break; + } + case 0x00800000: { + // 0x00a00090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rdlo = (instr >> 12) & 0xf; + unsigned rdhi = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // UMLAL{}{} , , , ; A1 + umlal(condition, + Register(rdlo), + Register(rdhi), + Register(rn), + Register(rm)); + break; + } + } + break; + } + case 0x00200020: { + // 0x002000b0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("STRHT", instr); + break; + } + case 0x01000000: { + // 0x01000090 + switch (instr & 0x00800300) { + case 0x00800000: { + // 0x01800090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STL{}{} , [] ; A1 + stl(condition, + Register(rt), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff0fff0) != 0x180fc90)) { + UnpredictableA32(instr); + } + break; + } + case 0x00800200: { + // 0x01800290 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STLEX{}{} , , [] ; A1 + stlex(condition, + Register(rd), + Register(rt), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff00ff0) != 0x1800e90)) { + UnpredictableA32(instr); + } + break; + } + case 0x00800300: { + // 0x01800390 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STREX{}{} , , [{, #}] ; A1 + strex(condition, + Register(rd), + Register(rt), + MemOperand(Register(rn), plus, 0, Offset)); + if (((instr & 0xff00ff0) != 0x1800f90)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01000020: { + // 0x010000b0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(((instr >> 23) & 0x1) == 0 ? minus : plus); + unsigned rm = instr & 0xf; + AddrMode addrmode = Offset; + // STRH{}{} , [, #{+/-}] ; A1 + strh(condition, + Best, + Register(rt), + MemOperand(Register(rn), + sign, + Register(rm), + addrmode)); + if (((instr & 0xf700ff0) != 0x10000b0)) { + UnpredictableA32(instr); + } + break; + } + case 0x01000040: { + // 0x010000d0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(((instr >> 23) & 0x1) == 0 ? minus : plus); + unsigned rm = instr & 0xf; + AddrMode addrmode = Offset; + // LDRD{}{} , , [, #{+/-}] ; A1 + ldrd(condition, + Register(rt), + Register(rt + 1), + MemOperand(Register(rn), + sign, + Register(rm), + addrmode)); + if (((instr & 0xf700ff0) != 0x10000d0)) { + UnpredictableA32(instr); + } + break; + } + case 0x01000060: { + // 0x010000f0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(((instr >> 23) & 0x1) == 0 ? minus : plus); + unsigned rm = instr & 0xf; + AddrMode addrmode = Offset; + // STRD{}{} , , [, #{+/-}] ; A1 + strd(condition, + Register(rt), + Register(rt + 1), + MemOperand(Register(rn), + sign, + Register(rm), + addrmode)); + if (((instr & 0xf700ff0) != 0x10000f0)) { + UnpredictableA32(instr); + } + break; + } + case 0x01200000: { + // 0x01200090 + switch (instr & 0x00800300) { + case 0x00800200: { + // 0x01a00290 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STLEXD{}{} , , , [] ; A1 + stlexd(condition, + Register(rd), + Register(rt), + Register(rt + 1), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff00ff0) != 0x1a00e90)) { + UnpredictableA32(instr); + } + break; + } + case 0x00800300: { + // 0x01a00390 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STREXD{}{} , , , [] ; A1 + strexd(condition, + Register(rd), + Register(rt), + Register(rt + 1), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff00ff0) != 0x1a00f90)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01200020: { + // 0x012000b0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(((instr >> 23) & 0x1) == 0 ? minus : plus); + unsigned rm = instr & 0xf; + AddrMode addrmode = PreIndex; + // STRH{}{} , [, #{+/-}]! ; A1 + strh(condition, + Best, + Register(rt), + MemOperand(Register(rn), + sign, + Register(rm), + addrmode)); + if (((instr & 0xf700ff0) != 0x12000b0)) { + UnpredictableA32(instr); + } + break; + } + case 0x01200040: { + // 0x012000d0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(((instr >> 23) & 0x1) == 0 ? minus : plus); + unsigned rm = instr & 0xf; + AddrMode addrmode = PreIndex; + // LDRD{}{} , , [, #{+/-}]! ; A1 + ldrd(condition, + Register(rt), + Register(rt + 1), + MemOperand(Register(rn), + sign, + Register(rm), + addrmode)); + if (((instr & 0xf700ff0) != 0x12000d0)) { + UnpredictableA32(instr); + } + break; + } + case 0x01200060: { + // 0x012000f0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign(((instr >> 23) & 0x1) == 0 ? minus : plus); + unsigned rm = instr & 0xf; + AddrMode addrmode = PreIndex; + // STRD{}{} , , [, #{+/-}]! ; A1 + strd(condition, + Register(rt), + Register(rt + 1), + MemOperand(Register(rn), + sign, + Register(rm), + addrmode)); + if (((instr & 0xf700ff0) != 0x12000f0)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00400000: { + // 0x00400010 + switch (instr & 0x01a00000) { + case 0x00000000: { + // 0x00400010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // SUB{}{} {}, , , ; A1 + sub(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x00200000: { + // 0x00600010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // RSB{}{} {}, , , ; A1 + rsb(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x00800000: { + // 0x00c00010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // SBC{}{} {}, , , ; A1 + sbc(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x00a00000: { + // 0x00e00010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // RSC{}{} {}, , , ; A1 + rsc(condition, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x01000000: { + // 0x01400010 + switch (instr & 0x00000060) { + case 0x00000040: { + // 0x01400050 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // QDADD{}{} {}, , ; A1 + qdadd(condition, + Register(rd), + Register(rm), + Register(rn)); + if (((instr & 0xff00ff0) != 0x1400050)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000060: { + // 0x01400070 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + uint32_t imm = (instr & 0xf) | ((instr >> 4) & 0xfff0); + // HVC{} {#} ; A1 + hvc(al, imm); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01200000: { + // 0x01600010 + switch (instr & 0x00000060) { + case 0x00000000: { + // 0x01600010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + // CLZ{}{} , ; A1 + clz(condition, Register(rd), Register(rm)); + if (((instr & 0xfff0ff0) != 0x16f0f10)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000040: { + // 0x01600050 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // QDSUB{}{} {}, , ; A1 + qdsub(condition, + Register(rd), + Register(rm), + Register(rn)); + if (((instr & 0xff00ff0) != 0x1600050)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000060: { + // 0x01600070 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("SMC", instr); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01800000: { + // 0x01c00010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // BIC{}{} {}, , , ; A1 + bic(condition, + Best, + Register(rd), + Register(rn), + Operand(Register(rm), shift.GetType(), Register(rs))); + break; + } + case 0x01a00000: { + // 0x01e00010 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rm = instr & 0xf; + Shift shift((instr >> 5) & 0x3); + unsigned rs = (instr >> 8) & 0xf; + // MVN{}{} , , ; A1 + mvn(condition, + Best, + Register(rd), + Operand(Register(rm), shift.GetType(), Register(rs))); + if (((instr & 0xfff0090) != 0x1e00010)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x00400080: { + // 0x00400090 + switch (instr & 0x00000060) { + case 0x00000000: { + // 0x00400090 + switch (instr & 0x01a00000) { + case 0x00000000: { + // 0x00400090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rdlo = (instr >> 12) & 0xf; + unsigned rdhi = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // UMAAL{}{} , , , ; A1 + umaal(condition, + Register(rdlo), + Register(rdhi), + Register(rn), + Register(rm)); + break; + } + case 0x00200000: { + // 0x00600090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + unsigned ra = (instr >> 12) & 0xf; + // MLS{}{} , , , ; A1 + mls(condition, + Register(rd), + Register(rn), + Register(rm), + Register(ra)); + break; + } + case 0x00800000: { + // 0x00c00090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rdlo = (instr >> 12) & 0xf; + unsigned rdhi = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMULL{}{} , , , ; A1 + smull(condition, + Register(rdlo), + Register(rdhi), + Register(rn), + Register(rm)); + break; + } + case 0x00a00000: { + // 0x00e00090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rdlo = (instr >> 12) & 0xf; + unsigned rdhi = (instr >> 16) & 0xf; + unsigned rn = instr & 0xf; + unsigned rm = (instr >> 8) & 0xf; + // SMLAL{}{} , , , ; A1 + smlal(condition, + Register(rdlo), + Register(rdhi), + Register(rn), + Register(rm)); + break; + } + case 0x01800000: { + // 0x01c00090 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0x01c00090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STLB{}{} , [] ; A1 + stlb(condition, + Register(rt), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff0fff0) != 0x1c0fc90)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000200: { + // 0x01c00290 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STLEXB{}{} , , [] ; A1 + stlexb(condition, + Register(rd), + Register(rt), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff00ff0) != 0x1c00e90)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000300: { + // 0x01c00390 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STREXB{}{} , , [] ; A1 + strexb(condition, + Register(rd), + Register(rt), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff00ff0) != 0x1c00f90)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01a00000: { + // 0x01e00090 + switch (instr & 0x00000300) { + case 0x00000000: { + // 0x01e00090 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STLH{}{} , [] ; A1 + stlh(condition, + Register(rt), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff0fff0) != 0x1e0fc90)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000200: { + // 0x01e00290 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STLEXH{}{} , , [] ; A1 + stlexh(condition, + Register(rd), + Register(rt), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff00ff0) != 0x1e00e90)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000300: { + // 0x01e00390 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = (instr >> 12) & 0xf; + unsigned rt = instr & 0xf; + unsigned rn = (instr >> 16) & 0xf; + // STREXH{}{} , , [] ; A1 + strexh(condition, + Register(rd), + Register(rt), + MemOperand(Register(rn), Offset)); + if (((instr & 0xff00ff0) != 0x1e00f90)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000020: { + // 0x004000b0 + switch (instr & 0x01200000) { + case 0x00000000: { + // 0x004000b0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign((((instr >> 23) & 0x1) == 0) ? minus : plus); + int32_t offset = (instr & 0xf) | ((instr >> 4) & 0xf0); + // STRH{}{} , [], #{+/-} ; A1 + strh(condition, + Best, + Register(rt), + MemOperand(Register(rn), sign, offset, PostIndex)); + break; + } + case 0x00200000: { + // 0x006000b0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("STRHT", instr); + break; + } + case 0x01000000: { + // 0x014000b0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign((((instr >> 23) & 0x1) == 0) ? minus : plus); + int32_t offset = (instr & 0xf) | ((instr >> 4) & 0xf0); + // STRH{}{} , [{, #{+/-}}] ; A1 + strh(condition, + Best, + Register(rt), + MemOperand(Register(rn), sign, offset, Offset)); + break; + } + case 0x01200000: { + // 0x016000b0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = (instr >> 16) & 0xf; + Sign sign((((instr >> 23) & 0x1) == 0) ? minus : plus); + int32_t offset = (instr & 0xf) | ((instr >> 4) & 0xf0); + // STRH{}{} , [{, #{+/-}}]! ; A1 + strh(condition, + Best, + Register(rt), + MemOperand(Register(rn), sign, offset, PreIndex)); + break; + } + } + break; + } + case 0x00000040: { + // 0x004000d0 + switch (instr & 0x000f0000) { + case 0x000f0000: { + // 0x004f00d0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + uint32_t U = (instr >> 23) & 0x1; + int32_t imm = (instr & 0xf) | ((instr >> 4) & 0xf0); + if (U == 0) imm = -imm; + bool minus_zero = (imm == 0) && (U == 0); + Location location(imm, kA32PcDelta); + // LDRD{}{} , ,
, [{, #{+/-}}] ; A1 + vstr(condition, + Untyped64, + DRegister(rd), + MemOperand(Register(rn), sign, offset, Offset)); + break; + } + case 0x00200000: { + // 0x0d200a00 + if ((instr & 0x00800000) == 0x00000000) { + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + if (((Uint32((instr >> 16)) & Uint32(0xf)) == Uint32(0xd)) && + ((instr & 0xf0000000) != 0xf0000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned first = ExtractSRegister(instr, 22, 12); + unsigned len = instr & 0xff; + // VPUSH{}{}{.} ; A2 + vpush(condition, + kDataTypeValueNone, + SRegisterList(SRegister(first), len)); + if ((len == 0) || ((first + len) > kNumberOfSRegisters)) { + UnpredictableA32(instr); + } + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractSRegister(instr, 22, 12); + unsigned len = instr & 0xff; + // VSTMDB{}{}{.} !, ; A2 + vstmdb(condition, + kDataTypeValueNone, + Register(rn), + WriteBack(WRITE_BACK), + SRegisterList(SRegister(first), len)); + if ((len == 0) || ((first + len) > kNumberOfSRegisters)) { + UnpredictableA32(instr); + } + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00200100: { + // 0x0d200b00 + switch (instr & 0x00800001) { + case 0x00000000: { + // 0x0d200b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + if (((Uint32((instr >> 16)) & Uint32(0xf)) == + Uint32(0xd)) && + ((instr & 0xf0000000) != 0xf0000000)) { + Condition condition((instr >> 28) & 0xf); + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // VPUSH{}{}{.} ; A1 + vpush(condition, + kDataTypeValueNone, + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || + (end > kMaxNumberOfDRegisters)) { + UnpredictableA32(instr); + } + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // VSTMDB{}{}{.} !, ; A1 + vstmdb(condition, + kDataTypeValueNone, + Register(rn), + WriteBack(WRITE_BACK), + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || + (end > kMaxNumberOfDRegisters)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000001: { + // 0x0d200b01 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // FSTMDBX{}{} !, ; A1 + fstmdbx(condition, + Register(rn), + WriteBack(WRITE_BACK), + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || (end > 16)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + case 0x01000e00: { + // 0x0d000e00 + switch (instr & 0x0060f100) { + case 0x00005000: { + // 0x0d005e00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("STC", instr); + break; + } + case 0x00205000: { + // 0x0d205e00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("STC", instr); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01100a00: { + // 0x0d100a00 + switch (instr & 0x00200100) { + case 0x00000000: { + // 0x0d100a00 + switch (instr & 0x000f0000) { + case 0x000f0000: { + // 0x0d1f0a00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + uint32_t U = (instr >> 23) & 0x1; + int32_t imm = instr & 0xff; + imm <<= 2; + if (U == 0) imm = -imm; + bool minus_zero = (imm == 0) && (U == 0); + Location location(imm, kA32PcDelta); + // VLDR{}{}{.32} ,
,
, [{, #{+/-}}] ; A1 + vldr(condition, + Untyped64, + DRegister(rd), + MemOperand(Register(rn), sign, offset, Offset)); + break; + } + } + break; + } + case 0x00200000: { + // 0x0d300a00 + if ((instr & 0x00800000) == 0x00000000) { + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractSRegister(instr, 22, 12); + unsigned len = instr & 0xff; + // VLDMDB{}{}{.} !, ; A2 + vldmdb(condition, + kDataTypeValueNone, + Register(rn), + WriteBack(WRITE_BACK), + SRegisterList(SRegister(first), len)); + if ((len == 0) || ((first + len) > kNumberOfSRegisters)) { + UnpredictableA32(instr); + } + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00200100: { + // 0x0d300b00 + switch (instr & 0x00800001) { + case 0x00000000: { + // 0x0d300b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // VLDMDB{}{}{.} !, ; A1 + vldmdb(condition, + kDataTypeValueNone, + Register(rn), + WriteBack(WRITE_BACK), + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || + (end > kMaxNumberOfDRegisters)) { + UnpredictableA32(instr); + } + break; + } + case 0x00000001: { + // 0x0d300b01 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rn = (instr >> 16) & 0xf; + unsigned first = ExtractDRegister(instr, 22, 12); + unsigned imm8 = (instr & 0xff); + unsigned len = imm8 / 2; + unsigned end = first + len; + // FLDMDBX{}{} !, ; A1 + fldmdbx(condition, + Register(rn), + WriteBack(WRITE_BACK), + DRegisterList(DRegister(first), len)); + if ((len == 0) || (len > 16) || (end > 16)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + case 0x01100e00: { + // 0x0d100e00 + switch (instr & 0x0060f100) { + case 0x00005000: { + // 0x0d105e00 + switch (instr & 0x000f0000) { + case 0x000f0000: { + // 0x0d1f5e00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("LDC", instr); + break; + } + default: { + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xf0000) == 0xf0000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("LDC", instr); + break; + } + } + break; + } + case 0x00205000: { + // 0x0d305e00 + if (((instr & 0xf0000000) == 0xf0000000) || + ((instr & 0xf0000) == 0xf0000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("LDC", instr); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x0e000000: { + // 0x0e000000 + switch (instr & 0x01000000) { + case 0x00000000: { + // 0x0e000000 + switch (instr & 0x00100e10) { + case 0x00000a00: { + // 0x0e000a00 + switch (instr & 0x00a00140) { + case 0x00000000: { + // 0x0e000a00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMLA{}{}.F32 , , ; A2 + vmla(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00000040: { + // 0x0e000a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMLS{}{}.F32 , , ; A2 + vmls(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00000100: { + // 0x0e000b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLA{}{}.F64
, , ; A2 + vmla(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000140: { + // 0x0e000b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMLS{}{}.F64
, , ; A2 + vmls(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0x0e200a00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMUL{}{}.F32 {}, , ; A2 + vmul(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00200040: { + // 0x0e200a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VNMUL{}{}.F32 {}, , ; A1 + vnmul(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00200100: { + // 0x0e200b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMUL{}{}.F64 {
}, , ; A2 + vmul(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200140: { + // 0x0e200b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VNMUL{}{}.F64 {
}, , ; A1 + vnmul(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00800000: { + // 0x0e800a00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VDIV{}{}.F32 {}, , ; A1 + vdiv(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00800100: { + // 0x0e800b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VDIV{}{}.F64 {
}, , ; A1 + vdiv(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00a00000: { + // 0x0ea00a00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VFMA{}{}.F32 , , ; A2 + vfma(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00a00040: { + // 0x0ea00a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VFMS{}{}.F32 , , ; A2 + vfms(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00a00100: { + // 0x0ea00b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFMA{}{}.F64
, , ; A2 + vfma(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00a00140: { + // 0x0ea00b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFMS{}{}.F64
, , ; A2 + vfms(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000a10: { + // 0x0e000a10 + switch (instr & 0x00800100) { + case 0x00000000: { + // 0x0e000a10 + if ((instr & 0x00600000) == 0x00000000) { + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rt = (instr >> 12) & 0xf; + // VMOV{}{} , ; A1 + vmov(condition, SRegister(rn), Register(rt)); + if (((instr & 0xff00f7f) != 0xe000a10)) { + UnpredictableA32(instr); + } + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00000100: { + // 0x0e000b10 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned lane; + DataType dt = + Dt_opc1_opc2_1_Decode(((instr >> 5) & 0x3) | + ((instr >> 19) & 0xc), + &lane); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 7, 16); + unsigned rt = (instr >> 12) & 0xf; + // VMOV{}{}{.} , ; A1 + vmov(condition, dt, DRegisterLane(rd, lane), Register(rt)); + if (((instr & 0xf900f1f) != 0xe000b10)) { + UnpredictableA32(instr); + } + break; + } + case 0x00800000: { + // 0x0e800a10 + if ((instr & 0x00600000) == 0x00600000) { + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned spec_reg = (instr >> 16) & 0xf; + unsigned rt = (instr >> 12) & 0xf; + switch (spec_reg) { + case 0x0: + case 0x1: + case 0x8: { + // VMSR{}{} , ; A1 + vmsr(condition, + SpecialFPRegister(spec_reg), + Register(rt)); + if (((instr & 0xff00fff) != 0xee00a10)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + } else { + UnallocatedA32(instr); + } + break; + } + case 0x00800100: { + // 0x0e800b10 + switch (instr & 0x00200040) { + case 0x00000000: { + // 0x0e800b10 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + DataType dt = Dt_B_E_1_Decode(((instr >> 5) & 0x1) | + ((instr >> 21) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 7, 16); + unsigned rt = (instr >> 12) & 0xf; + // VDUP{}{}.
, ; A1 + vdup(condition, dt, DRegister(rd), Register(rt)); + if (((instr & 0xfb00f5f) != 0xe800b10)) { + UnpredictableA32(instr); + } + break; + } + case 0x00200000: { + // 0x0ea00b10 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + DataType dt = Dt_B_E_1_Decode(((instr >> 5) & 0x1) | + ((instr >> 21) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + if (((instr >> 16) & 1) != 0) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractQRegister(instr, 7, 16); + unsigned rt = (instr >> 12) & 0xf; + // VDUP{}{}.
, ; A1 + vdup(condition, dt, QRegister(rd), Register(rt)); + if (((instr & 0xfb00f5f) != 0xea00b10)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + } + break; + } + case 0x00000e10: { + // 0x0e000e10 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("MCR", instr); + break; + } + case 0x00100a00: { + // 0x0e100a00 + switch (instr & 0x00a00140) { + case 0x00000000: { + // 0x0e100a00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VNMLS{}{}.F32 , , ; A1 + vnmls(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00000040: { + // 0x0e100a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VNMLA{}{}.F32 , , ; A1 + vnmla(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00000100: { + // 0x0e100b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VNMLS{}{}.F64
, , ; A1 + vnmls(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00000140: { + // 0x0e100b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VNMLA{}{}.F64
, , ; A1 + vnmla(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200000: { + // 0x0e300a00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VADD{}{}.F32 {}, , ; A2 + vadd(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00200040: { + // 0x0e300a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSUB{}{}.F32 {}, , ; A2 + vsub(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00200100: { + // 0x0e300b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VADD{}{}.F64 {
}, , ; A2 + vadd(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00200140: { + // 0x0e300b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSUB{}{}.F64 {
}, , ; A2 + vsub(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00800000: { + // 0x0e900a00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VFNMS{}{}.F32 , , ; A1 + vfnms(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00800040: { + // 0x0e900a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rn = ExtractSRegister(instr, 7, 16); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VFNMA{}{}.F32 , , ; A1 + vfnma(condition, + F32, + SRegister(rd), + SRegister(rn), + SRegister(rm)); + break; + } + case 0x00800100: { + // 0x0e900b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFNMS{}{}.F64
, , ; A1 + vfnms(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00800140: { + // 0x0e900b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rn = ExtractDRegister(instr, 7, 16); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VFNMA{}{}.F64
, , ; A1 + vfnma(condition, + F64, + DRegister(rd), + DRegister(rn), + DRegister(rm)); + break; + } + case 0x00a00000: { + // 0x0eb00a00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + uint32_t encoded_imm = + (instr & 0xf) | ((instr >> 12) & 0xf0); + NeonImmediate imm = + ImmediateVFP::Decode(encoded_imm); + // VMOV{}{}.F32 , # ; A2 + vmov(condition, F32, SRegister(rd), imm); + if (((instr & 0xfb00ff0) != 0xeb00a00)) { + UnpredictableA32(instr); + } + break; + } + case 0x00a00040: { + // 0x0eb00a40 + switch (instr & 0x000e0000) { + case 0x00000000: { + // 0x0eb00a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0eb00a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VMOV{}{}.F32 , ; A2 + vmov(condition, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x00000080: { + // 0x0eb00ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VABS{}{}.F32 , ; A2 + vabs(condition, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x00010000: { + // 0x0eb10a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VNEG{}{}.F32 , ; A2 + vneg(condition, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x00010080: { + // 0x0eb10ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VSQRT{}{}.F32 , ; A1 + vsqrt(condition, F32, SRegister(rd), SRegister(rm)); + break; + } + } + break; + } + case 0x00020000: { + // 0x0eb20a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0eb20a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTB{}{}.F32.F16 , ; A1 + vcvtb(condition, + F32, + F16, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0x0eb20ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTT{}{}.F32.F16 , ; A1 + vcvtt(condition, + F32, + F16, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0x0eb30a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTB{}{}.F16.F32 , ; A1 + vcvtb(condition, + F16, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010080: { + // 0x0eb30ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTT{}{}.F16.F32 , ; A1 + vcvtt(condition, + F16, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + } + break; + } + case 0x00040000: { + // 0x0eb40a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0eb40a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCMP{}{}.F32 , ; A1 + vcmp(condition, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x00000080: { + // 0x0eb40ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCMPE{}{}.F32 , ; A1 + vcmpe(condition, F32, SRegister(rd), SRegister(rm)); + break; + } + case 0x00010000: { + // 0x0eb50a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + // VCMP{}{}.F32 , #0.0 ; A2 + vcmp(condition, F32, SRegister(rd), 0.0); + if (((instr & 0xfbf0fff) != 0xeb50a40)) { + UnpredictableA32(instr); + } + break; + } + case 0x00010080: { + // 0x0eb50ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + // VCMPE{}{}.F32 , #0.0 ; A2 + vcmpe(condition, F32, SRegister(rd), 0.0); + if (((instr & 0xfbf0fff) != 0xeb50ac0)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x00060000: { + // 0x0eb60a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0eb60a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTR{}{}.F32 , ; A1 + vrintr(condition, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0x0eb60ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTZ{}{}.F32 , ; A1 + vrintz(condition, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0x0eb70a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VRINTX{}{}.F32 , ; A1 + vrintx(condition, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010080: { + // 0x0eb70ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.F64.F32
, ; A1 + vcvt(condition, + F64, + F32, + DRegister(rd), + SRegister(rm)); + break; + } + } + break; + } + case 0x00080000: { + // 0x0eb80a40 + if ((instr & 0x00010000) == 0x00000000) { + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.F32.
, ; A1 + vcvt(condition, + F32, + dt, + SRegister(rd), + SRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000a0000: { + // 0x0eba0a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + DataType dt = Dt_U_sx_1_Decode(((instr >> 7) & 0x1) | + ((instr >> 15) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned offset = 32; + if (dt.Is(S16) || dt.Is(U16)) { + offset = 16; + } + uint32_t fbits = offset - (((instr >> 5) & 0x1) | + ((instr << 1) & 0x1e)); + // VCVT{}{}.F32.
, , # ; A1 + vcvt(condition, + F32, + dt, + SRegister(rd), + SRegister(rd), + fbits); + break; + } + case 0x000c0000: { + // 0x0ebc0a40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0ebc0a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTR{}{}.U32.F32 , ; A1 + vcvtr(condition, + U32, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0x0ebc0ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.U32.F32 , ; A1 + vcvt(condition, + U32, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0x0ebd0a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTR{}{}.S32.F32 , ; A1 + vcvtr(condition, + S32, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + case 0x00010080: { + // 0x0ebd0ac0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.S32.F32 , ; A1 + vcvt(condition, + S32, + F32, + SRegister(rd), + SRegister(rm)); + break; + } + } + break; + } + case 0x000e0000: { + // 0x0ebe0a40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + DataType dt = Dt_U_sx_1_Decode(((instr >> 7) & 0x1) | + ((instr >> 15) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned offset = 32; + if (dt.Is(S16) || dt.Is(U16)) { + offset = 16; + } + uint32_t fbits = offset - (((instr >> 5) & 0x1) | + ((instr << 1) & 0x1e)); + // VCVT{}{}.
.F32 , , # ; A1 + vcvt(condition, + dt, + F32, + SRegister(rd), + SRegister(rd), + fbits); + break; + } + } + break; + } + case 0x00a00100: { + // 0x0eb00b00 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + uint32_t encoded_imm = + (instr & 0xf) | ((instr >> 12) & 0xf0); + NeonImmediate imm = + ImmediateVFP::Decode(encoded_imm); + // VMOV{}{}.F64
, # ; A2 + vmov(condition, F64, DRegister(rd), imm); + if (((instr & 0xfb00ff0) != 0xeb00b00)) { + UnpredictableA32(instr); + } + break; + } + case 0x00a00140: { + // 0x0eb00b40 + switch (instr & 0x000e0000) { + case 0x00000000: { + // 0x0eb00b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0eb00b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VMOV{}{}.F64
, ; A2 + vmov(condition, F64, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000080: { + // 0x0eb00bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VABS{}{}.F64
, ; A2 + vabs(condition, F64, DRegister(rd), DRegister(rm)); + break; + } + case 0x00010000: { + // 0x0eb10b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VNEG{}{}.F64
, ; A2 + vneg(condition, F64, DRegister(rd), DRegister(rm)); + break; + } + case 0x00010080: { + // 0x0eb10bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VSQRT{}{}.F64
, ; A1 + vsqrt(condition, F64, DRegister(rd), DRegister(rm)); + break; + } + } + break; + } + case 0x00020000: { + // 0x0eb20b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0eb20b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTB{}{}.F64.F16
, ; A1 + vcvtb(condition, + F64, + F16, + DRegister(rd), + SRegister(rm)); + break; + } + case 0x00000080: { + // 0x0eb20bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVTT{}{}.F64.F16
, ; A1 + vcvtt(condition, + F64, + F16, + DRegister(rd), + SRegister(rm)); + break; + } + case 0x00010000: { + // 0x0eb30b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTB{}{}.F16.F64 , ; A1 + vcvtb(condition, + F16, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + case 0x00010080: { + // 0x0eb30bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTT{}{}.F16.F64 , ; A1 + vcvtt(condition, + F16, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00040000: { + // 0x0eb40b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0eb40b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCMP{}{}.F64
, ; A1 + vcmp(condition, F64, DRegister(rd), DRegister(rm)); + break; + } + case 0x00000080: { + // 0x0eb40bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCMPE{}{}.F64
, ; A1 + vcmpe(condition, F64, DRegister(rd), DRegister(rm)); + break; + } + case 0x00010000: { + // 0x0eb50b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + // VCMP{}{}.F64
, #0.0 ; A2 + vcmp(condition, F64, DRegister(rd), 0.0); + if (((instr & 0xfbf0fff) != 0xeb50b40)) { + UnpredictableA32(instr); + } + break; + } + case 0x00010080: { + // 0x0eb50bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + // VCMPE{}{}.F64
, #0.0 ; A2 + vcmpe(condition, F64, DRegister(rd), 0.0); + if (((instr & 0xfbf0fff) != 0xeb50bc0)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x00060000: { + // 0x0eb60b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0eb60b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTR{}{}.F64
, ; A1 + vrintr(condition, + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00000080: { + // 0x0eb60bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTZ{}{}.F64
, ; A1 + vrintz(condition, + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00010000: { + // 0x0eb70b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VRINTX{}{}.F64
, ; A1 + vrintx(condition, + F64, + DRegister(rd), + DRegister(rm)); + break; + } + case 0x00010080: { + // 0x0eb70bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVT{}{}.F32.F64 , ; A1 + vcvt(condition, + F32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x00080000: { + // 0x0eb80b40 + if ((instr & 0x00010000) == 0x00000000) { + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + DataType dt = Dt_op_2_Decode((instr >> 7) & 0x1); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned rm = ExtractSRegister(instr, 5, 0); + // VCVT{}{}.F64.
, ; A1 + vcvt(condition, + F64, + dt, + DRegister(rd), + SRegister(rm)); + } else { + UnallocatedA32(instr); + } + break; + } + case 0x000a0000: { + // 0x0eba0b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + DataType dt = Dt_U_sx_1_Decode(((instr >> 7) & 0x1) | + ((instr >> 15) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned offset = 32; + if (dt.Is(S16) || dt.Is(U16)) { + offset = 16; + } + uint32_t fbits = offset - (((instr >> 5) & 0x1) | + ((instr << 1) & 0x1e)); + // VCVT{}{}.F64.
, , # ; A1 + vcvt(condition, + F64, + dt, + DRegister(rd), + DRegister(rd), + fbits); + break; + } + case 0x000c0000: { + // 0x0ebc0b40 + switch (instr & 0x00010080) { + case 0x00000000: { + // 0x0ebc0b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTR{}{}.U32.F64 , ; A1 + vcvtr(condition, + U32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + case 0x00000080: { + // 0x0ebc0bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVT{}{}.U32.F64 , ; A1 + vcvt(condition, + U32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + case 0x00010000: { + // 0x0ebd0b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVTR{}{}.S32.F64 , ; A1 + vcvtr(condition, + S32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + case 0x00010080: { + // 0x0ebd0bc0 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rd = ExtractSRegister(instr, 22, 12); + unsigned rm = ExtractDRegister(instr, 5, 0); + // VCVT{}{}.S32.F64 , ; A1 + vcvt(condition, + S32, + F64, + SRegister(rd), + DRegister(rm)); + break; + } + } + break; + } + case 0x000e0000: { + // 0x0ebe0b40 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + DataType dt = Dt_U_sx_1_Decode(((instr >> 7) & 0x1) | + ((instr >> 15) & 0x2)); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rd = ExtractDRegister(instr, 22, 12); + unsigned offset = 32; + if (dt.Is(S16) || dt.Is(U16)) { + offset = 16; + } + uint32_t fbits = offset - (((instr >> 5) & 0x1) | + ((instr << 1) & 0x1e)); + // VCVT{}{}.
.F64 , , # ; A1 + vcvt(condition, + dt, + F64, + DRegister(rd), + DRegister(rd), + fbits); + break; + } + } + break; + } + } + break; + } + case 0x00100a10: { + // 0x0e100a10 + switch (instr & 0x00000100) { + case 0x00000000: { + // 0x0e100a10 + switch (instr & 0x00e00000) { + case 0x00000000: { + // 0x0e100a10 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = ExtractSRegister(instr, 7, 16); + // VMOV{}{} , ; A1 + vmov(condition, Register(rt), SRegister(rn)); + if (((instr & 0xff00f7f) != 0xe100a10)) { + UnpredictableA32(instr); + } + break; + } + case 0x00e00000: { + // 0x0ef00a10 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned rt = (instr >> 12) & 0xf; + unsigned spec_reg = (instr >> 16) & 0xf; + switch (spec_reg) { + case 0x0: + case 0x1: + case 0x5: + case 0x6: + case 0x7: + case 0x8: { + // VMRS{}{} , ; A1 + vmrs(condition, + RegisterOrAPSR_nzcv(rt), + SpecialFPRegister(spec_reg)); + if (((instr & 0xff00fff) != 0xef00a10)) { + UnpredictableA32(instr); + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x00000100: { + // 0x0e100b10 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + unsigned lane; + DataType dt = + Dt_U_opc1_opc2_1_Decode(((instr >> 5) & 0x3) | + ((instr >> 19) & 0xc) | + ((instr >> 19) & 0x10), + &lane); + if (dt.Is(kDataTypeValueInvalid)) { + UnallocatedA32(instr); + return; + } + unsigned rt = (instr >> 12) & 0xf; + unsigned rn = ExtractDRegister(instr, 7, 16); + // VMOV{}{}{.
} , ; A1 + vmov(condition, dt, Register(rt), DRegisterLane(rn, lane)); + if (((instr & 0xf100f1f) != 0xe100b10)) { + UnpredictableA32(instr); + } + break; + } + } + break; + } + case 0x00100e10: { + // 0x0e100e10 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + UnimplementedA32("MRC", instr); + break; + } + default: + UnallocatedA32(instr); + break; + } + break; + } + case 0x01000000: { + // 0x0f000000 + if (((instr & 0xf0000000) == 0xf0000000)) { + UnallocatedA32(instr); + return; + } + Condition condition((instr >> 28) & 0xf); + uint32_t imm = instr & 0xffffff; + // SVC{}{} {#} ; A1 + svc(condition, imm); + break; + } + } + break; + } + } + } +} // NOLINT(readability/fn_size) +// End of generated code. + +const uint16_t* PrintDisassembler::DecodeT32At( + const uint16_t* instruction_address, const uint16_t* buffer_end) { + uint32_t instruction = *instruction_address++ << 16; + + if (instruction >= kLowestT32_32Opcode) { + if (instruction_address >= buffer_end) { + os() << "?\n"; + return instruction_address; + } + instruction |= *instruction_address++; + } + + DecodeT32(instruction); + return instruction_address; +} + +void PrintDisassembler::DecodeT32(uint32_t instruction) { + PrintCodeAddress(GetCodeAddress()); + if (T32Size(instruction) == 2) { + PrintOpcode16(instruction >> 16); + Disassembler::DecodeT32(instruction); + } else { + PrintOpcode32(instruction); + Disassembler::DecodeT32(instruction); + } + os() << "\n"; +} + + +void PrintDisassembler::DecodeA32(uint32_t instruction) { + PrintCodeAddress(GetCodeAddress()); + PrintOpcode32(instruction); + Disassembler::DecodeA32(instruction); + os() << "\n"; +} + + +void PrintDisassembler::DisassembleA32Buffer(const uint32_t* buffer, + size_t size_in_bytes) { + VIXL_ASSERT(IsAligned(buffer)); + VIXL_ASSERT(IsMultiple(size_in_bytes)); + const uint32_t* const end_buffer = + buffer + (size_in_bytes / sizeof(uint32_t)); + while (buffer < end_buffer) { + DecodeA32(*buffer++); + } +} + + +void PrintDisassembler::DisassembleT32Buffer(const uint16_t* buffer, + size_t size_in_bytes) { + VIXL_ASSERT(IsAligned(buffer)); + VIXL_ASSERT(IsMultiple(size_in_bytes)); + const uint16_t* const end_buffer = + buffer + (size_in_bytes / sizeof(uint16_t)); + while (buffer < end_buffer) { + buffer = DecodeT32At(buffer, end_buffer); + } + VIXL_ASSERT(buffer == end_buffer); +} + +} // namespace aarch32 +} // namespace vixl diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch32/disasm-aarch32.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/disasm-aarch32.h new file mode 100644 index 00000000..679f47ba --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/disasm-aarch32.h @@ -0,0 +1,2723 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_DISASM_AARCH32_H_ +#define VIXL_DISASM_AARCH32_H_ + +extern "C" { +#include +} + +#include + +#include "aarch32/constants-aarch32.h" +#include "aarch32/operands-aarch32.h" + +namespace vixl { +namespace aarch32 { + +class ITBlock { + Condition first_condition_; + Condition condition_; + uint16_t it_mask_; + + public: + ITBlock() : first_condition_(al), condition_(al), it_mask_(0) {} + void Advance() { + condition_ = Condition((condition_.GetCondition() & 0xe) | (it_mask_ >> 3)); + it_mask_ = (it_mask_ << 1) & 0xf; + } + bool InITBlock() const { return it_mask_ != 0; } + bool OutsideITBlock() const { return !InITBlock(); } + bool LastInITBlock() const { return it_mask_ == 0x8; } + bool OutsideITBlockOrLast() const { + return OutsideITBlock() || LastInITBlock(); + } + void Set(Condition first_condition, uint16_t mask) { + condition_ = first_condition_ = first_condition; + it_mask_ = mask; + } + Condition GetFirstCondition() const { return first_condition_; } + Condition GetCurrentCondition() const { return condition_; } +}; + +class Disassembler { + public: + enum LocationType { + kAnyLocation, + kCodeLocation, + kDataLocation, + kCoprocLocation, + kLoadByteLocation, + kLoadHalfWordLocation, + kLoadWordLocation, + kLoadDoubleWordLocation, + kLoadSignedByteLocation, + kLoadSignedHalfWordLocation, + kLoadSinglePrecisionLocation, + kLoadDoublePrecisionLocation, + kStoreByteLocation, + kStoreHalfWordLocation, + kStoreWordLocation, + kStoreDoubleWordLocation, + kStoreSinglePrecisionLocation, + kStoreDoublePrecisionLocation, + kVld1Location, + kVld2Location, + kVld3Location, + kVld4Location, + kVst1Location, + kVst2Location, + kVst3Location, + kVst4Location + }; + + class ConditionPrinter { + const ITBlock& it_block_; + Condition cond_; + + public: + ConditionPrinter(const ITBlock& it_block, Condition cond) + : it_block_(it_block), cond_(cond) {} + const ITBlock& GetITBlock() const { return it_block_; } + Condition GetCond() const { return cond_; } + friend std::ostream& operator<<(std::ostream& os, ConditionPrinter cond) { + if (cond.it_block_.InITBlock() && cond.cond_.Is(al) && + !cond.cond_.IsNone()) { + return os << "al"; + } + return os << cond.cond_; + } + }; + + class ImmediatePrinter { + uint32_t imm_; + + public: + explicit ImmediatePrinter(uint32_t imm) : imm_(imm) {} + uint32_t GetImm() const { return imm_; } + friend std::ostream& operator<<(std::ostream& os, ImmediatePrinter imm) { + return os << "#" << imm.GetImm(); + } + }; + + class SignedImmediatePrinter { + int32_t imm_; + + public: + explicit SignedImmediatePrinter(int32_t imm) : imm_(imm) {} + int32_t GetImm() const { return imm_; } + friend std::ostream& operator<<(std::ostream& os, + SignedImmediatePrinter imm) { + return os << "#" << imm.GetImm(); + } + }; + + class RawImmediatePrinter { + uint32_t imm_; + + public: + explicit RawImmediatePrinter(uint32_t imm) : imm_(imm) {} + uint32_t GetImm() const { return imm_; } + friend std::ostream& operator<<(std::ostream& os, RawImmediatePrinter imm) { + return os << imm.GetImm(); + } + }; + + class DtPrinter { + DataType dt_; + DataType default_dt_; + + public: + DtPrinter(DataType dt, DataType default_dt) + : dt_(dt), default_dt_(default_dt) {} + DataType GetDt() const { return dt_; } + DataType GetDefaultDt() const { return default_dt_; } + friend std::ostream& operator<<(std::ostream& os, DtPrinter dt) { + if (dt.dt_.Is(dt.default_dt_)) return os; + return os << dt.dt_; + } + }; + + class IndexedRegisterPrinter { + DRegister reg_; + uint32_t index_; + + public: + IndexedRegisterPrinter(DRegister reg, uint32_t index) + : reg_(reg), index_(index) {} + DRegister GetReg() const { return reg_; } + uint32_t GetIndex() const { return index_; } + friend std::ostream& operator<<(std::ostream& os, + IndexedRegisterPrinter reg) { + return os << reg.GetReg() << "[" << reg.GetIndex() << "]"; + } + }; + + // TODO: Merge this class with PrintLabel below. This Location class + // represents a PC-relative offset, not an address. + class Location { + public: + typedef int32_t Offset; + + Location(Offset immediate, Offset pc_offset) + : immediate_(immediate), pc_offset_(pc_offset) {} + Offset GetImmediate() const { return immediate_; } + Offset GetPCOffset() const { return pc_offset_; } + + private: + Offset immediate_; + Offset pc_offset_; + }; + + class PrintLabel { + LocationType location_type_; + Location::Offset immediate_; + Location::Offset location_; + + public: + PrintLabel(LocationType location_type, + Location* offset, + Location::Offset position) + : location_type_(location_type), + immediate_(offset->GetImmediate()), + location_(static_cast( + static_cast(offset->GetPCOffset()) + + offset->GetImmediate() + position)) {} + + LocationType GetLocationType() const { return location_type_; } + Location::Offset GetLocation() const { return location_; } + Location::Offset GetImmediate() const { return immediate_; } + + friend inline std::ostream& operator<<(std::ostream& os, + const PrintLabel& label) { + os << "0x" << std::hex << std::setw(8) << std::setfill('0') + << label.GetLocation() << std::dec; + return os; + } + }; + + + class PrintMemOperand { + LocationType location_type_; + const MemOperand& operand_; + + public: + PrintMemOperand(LocationType location_type, const MemOperand& operand) + : location_type_(location_type), operand_(operand) {} + LocationType GetLocationType() const { return location_type_; } + const MemOperand& GetOperand() const { return operand_; } + }; + + class PrintAlignedMemOperand { + LocationType location_type_; + const AlignedMemOperand& operand_; + + public: + PrintAlignedMemOperand(LocationType location_type, + const AlignedMemOperand& operand) + : location_type_(location_type), operand_(operand) {} + LocationType GetLocationType() const { return location_type_; } + const AlignedMemOperand& GetOperand() const { return operand_; } + }; + + class DisassemblerStream { + std::ostream& os_; + InstructionType current_instruction_type_; + InstructionAttribute current_instruction_attributes_; + + public: + explicit DisassemblerStream(std::ostream& os) // NOLINT(runtime/references) + : os_(os), + current_instruction_type_(kUndefInstructionType), + current_instruction_attributes_(kNoAttribute) {} + virtual ~DisassemblerStream() {} + std::ostream& os() const { return os_; } + void SetCurrentInstruction( + InstructionType current_instruction_type, + InstructionAttribute current_instruction_attributes) { + current_instruction_type_ = current_instruction_type; + current_instruction_attributes_ = current_instruction_attributes; + } + InstructionType GetCurrentInstructionType() const { + return current_instruction_type_; + } + InstructionAttribute GetCurrentInstructionAttributes() const { + return current_instruction_attributes_; + } + bool Has(InstructionAttribute attributes) const { + return (current_instruction_attributes_ & attributes) == attributes; + } + template + DisassemblerStream& operator<<(T value) { + os_ << value; + return *this; + } + virtual DisassemblerStream& operator<<(const char* string) { + os_ << string; + return *this; + } + virtual DisassemblerStream& operator<<(const ConditionPrinter& cond) { + os_ << cond; + return *this; + } + virtual DisassemblerStream& operator<<(Condition cond) { + os_ << cond; + return *this; + } + virtual DisassemblerStream& operator<<(const EncodingSize& size) { + os_ << size; + return *this; + } + virtual DisassemblerStream& operator<<(const ImmediatePrinter& imm) { + os_ << imm; + return *this; + } + virtual DisassemblerStream& operator<<(const SignedImmediatePrinter& imm) { + os_ << imm; + return *this; + } + virtual DisassemblerStream& operator<<(const RawImmediatePrinter& imm) { + os_ << imm; + return *this; + } + virtual DisassemblerStream& operator<<(const DtPrinter& dt) { + os_ << dt; + return *this; + } + virtual DisassemblerStream& operator<<(const DataType& type) { + os_ << type; + return *this; + } + virtual DisassemblerStream& operator<<(Shift shift) { + os_ << shift; + return *this; + } + virtual DisassemblerStream& operator<<(Sign sign) { + os_ << sign; + return *this; + } + virtual DisassemblerStream& operator<<(Alignment alignment) { + os_ << alignment; + return *this; + } + virtual DisassemblerStream& operator<<(const PrintLabel& label) { + os_ << label; + return *this; + } + virtual DisassemblerStream& operator<<(const WriteBack& write_back) { + os_ << write_back; + return *this; + } + virtual DisassemblerStream& operator<<(const NeonImmediate& immediate) { + os_ << immediate; + return *this; + } + virtual DisassemblerStream& operator<<(Register reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(SRegister reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(DRegister reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(QRegister reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(const RegisterOrAPSR_nzcv reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(SpecialRegister reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(MaskedSpecialRegister reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(SpecialFPRegister reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(BankedRegister reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(const RegisterList& list) { + os_ << list; + return *this; + } + virtual DisassemblerStream& operator<<(const SRegisterList& list) { + os_ << list; + return *this; + } + virtual DisassemblerStream& operator<<(const DRegisterList& list) { + os_ << list; + return *this; + } + virtual DisassemblerStream& operator<<(const NeonRegisterList& list) { + os_ << list; + return *this; + } + virtual DisassemblerStream& operator<<(const DRegisterLane& reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(const IndexedRegisterPrinter& reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(Coprocessor coproc) { + os_ << coproc; + return *this; + } + virtual DisassemblerStream& operator<<(CRegister reg) { + os_ << reg; + return *this; + } + virtual DisassemblerStream& operator<<(Endianness endian_specifier) { + os_ << endian_specifier; + return *this; + } + virtual DisassemblerStream& operator<<(MemoryBarrier option) { + os_ << option; + return *this; + } + virtual DisassemblerStream& operator<<(InterruptFlags iflags) { + os_ << iflags; + return *this; + } + virtual DisassemblerStream& operator<<(const Operand& operand) { + if (operand.IsImmediate()) { + if (Has(kBitwise)) { + return *this << "#0x" << std::hex << operand.GetImmediate() + << std::dec; + } + return *this << "#" << operand.GetImmediate(); + } + if (operand.IsImmediateShiftedRegister()) { + if ((operand.GetShift().IsLSL() || operand.GetShift().IsROR()) && + (operand.GetShiftAmount() == 0)) { + return *this << operand.GetBaseRegister(); + } + if (operand.GetShift().IsRRX()) { + return *this << operand.GetBaseRegister() << ", rrx"; + } + return *this << operand.GetBaseRegister() << ", " << operand.GetShift() + << " #" << operand.GetShiftAmount(); + } + if (operand.IsRegisterShiftedRegister()) { + return *this << operand.GetBaseRegister() << ", " << operand.GetShift() + << " " << operand.GetShiftRegister(); + } + VIXL_UNREACHABLE(); + return *this; + } + virtual DisassemblerStream& operator<<(const SOperand& operand) { + if (operand.IsImmediate()) { + return *this << operand.GetNeonImmediate(); + } + return *this << operand.GetRegister(); + } + virtual DisassemblerStream& operator<<(const DOperand& operand) { + if (operand.IsImmediate()) { + return *this << operand.GetNeonImmediate(); + } + return *this << operand.GetRegister(); + } + virtual DisassemblerStream& operator<<(const QOperand& operand) { + if (operand.IsImmediate()) { + return *this << operand.GetNeonImmediate(); + } + return *this << operand.GetRegister(); + } + virtual DisassemblerStream& operator<<(const MemOperand& operand) { + *this << "[" << operand.GetBaseRegister(); + if (operand.GetAddrMode() == PostIndex) { + *this << "]"; + if (operand.IsRegisterOnly()) return *this << "!"; + } + if (operand.IsImmediate()) { + if ((operand.GetOffsetImmediate() != 0) || + operand.GetSign().IsMinus() || + ((operand.GetAddrMode() != Offset) && !operand.IsRegisterOnly())) { + if (operand.GetOffsetImmediate() == 0) { + *this << ", #" << operand.GetSign() << operand.GetOffsetImmediate(); + } else { + *this << ", #" << operand.GetOffsetImmediate(); + } + } + } else if (operand.IsPlainRegister()) { + *this << ", " << operand.GetSign() << operand.GetOffsetRegister(); + } else if (operand.IsShiftedRegister()) { + *this << ", " << operand.GetSign() << operand.GetOffsetRegister() + << ImmediateShiftOperand(operand.GetShift(), + operand.GetShiftAmount()); + } else { + VIXL_UNREACHABLE(); + return *this; + } + if (operand.GetAddrMode() == Offset) { + *this << "]"; + } else if (operand.GetAddrMode() == PreIndex) { + *this << "]!"; + } + return *this; + } + virtual DisassemblerStream& operator<<(const PrintMemOperand& operand) { + return *this << operand.GetOperand(); + } + virtual DisassemblerStream& operator<<(const AlignedMemOperand& operand) { + *this << "[" << operand.GetBaseRegister() << operand.GetAlignment() + << "]"; + if (operand.GetAddrMode() == PostIndex) { + if (operand.IsPlainRegister()) { + *this << ", " << operand.GetOffsetRegister(); + } else { + *this << "!"; + } + } + return *this; + } + virtual DisassemblerStream& operator<<( + const PrintAlignedMemOperand& operand) { + return *this << operand.GetOperand(); + } + }; + + private: + class ITBlockScope { + ITBlock* const it_block_; + bool inside_; + + public: + explicit ITBlockScope(ITBlock* it_block) + : it_block_(it_block), inside_(it_block->InITBlock()) {} + ~ITBlockScope() { + if (inside_) it_block_->Advance(); + } + }; + + ITBlock it_block_; + DisassemblerStream* os_; + bool owns_os_; + uint32_t code_address_; + // True if the disassembler always output instructions with all the + // registers (even if two registers are identical and only one could be + // output). + bool use_short_hand_form_; + + public: + explicit Disassembler(std::ostream& os, // NOLINT(runtime/references) + uint32_t code_address = 0) + : os_(new DisassemblerStream(os)), + owns_os_(true), + code_address_(code_address), + use_short_hand_form_(true) {} + explicit Disassembler(DisassemblerStream* os, uint32_t code_address = 0) + : os_(os), + owns_os_(false), + code_address_(code_address), + use_short_hand_form_(true) {} + virtual ~Disassembler() { + if (owns_os_) { + delete os_; + } + } + DisassemblerStream& os() const { return *os_; } + void SetIT(Condition first_condition, uint16_t it_mask) { + it_block_.Set(first_condition, it_mask); + } + const ITBlock& GetITBlock() const { return it_block_; } + bool InITBlock() const { return it_block_.InITBlock(); } + bool OutsideITBlock() const { return it_block_.OutsideITBlock(); } + bool OutsideITBlockOrLast() const { return it_block_.OutsideITBlockOrLast(); } + void CheckNotIT() const { VIXL_ASSERT(it_block_.OutsideITBlock()); } + // Return the current condition depending on the IT state for T32. + Condition CurrentCond() const { + if (it_block_.OutsideITBlock()) return al; + return it_block_.GetCurrentCondition(); + } + bool UseShortHandForm() const { return use_short_hand_form_; } + void SetUseShortHandForm(bool use_short_hand_form) { + use_short_hand_form_ = use_short_hand_form; + } + + virtual void UnallocatedT32(uint32_t instruction) { + if (T32Size(instruction) == 2) { + os() << "unallocated " << std::hex << std::setw(4) << std::setfill('0') + << (instruction >> 16) << std::dec; + } else { + os() << "unallocated " << std::hex << std::setw(8) << std::setfill('0') + << instruction << std::dec; + } + } + virtual void UnallocatedA32(uint32_t instruction) { + os() << "unallocated " << std::hex << std::setw(8) << std::setfill('0') + << instruction << std::dec; + } + virtual void UnimplementedT32_16(const char* name, uint32_t instruction) { + os() << "unimplemented " << name << " T32:" << std::hex << std::setw(4) + << std::setfill('0') << (instruction >> 16) << std::dec; + } + virtual void UnimplementedT32_32(const char* name, uint32_t instruction) { + os() << "unimplemented " << name << " T32:" << std::hex << std::setw(8) + << std::setfill('0') << instruction << std::dec; + } + virtual void UnimplementedA32(const char* name, uint32_t instruction) { + os() << "unimplemented " << name << " ARM:" << std::hex << std::setw(8) + << std::setfill('0') << instruction << std::dec; + } + virtual void Unpredictable() { os() << " ; unpredictable"; } + virtual void UnpredictableT32(uint32_t /*instr*/) { return Unpredictable(); } + virtual void UnpredictableA32(uint32_t /*instr*/) { return Unpredictable(); } + + static bool Is16BitEncoding(uint32_t instr) { return instr < 0xe8000000; } + uint32_t GetCodeAddress() const { return code_address_; } + void SetCodeAddress(uint32_t code_address) { code_address_ = code_address; } + + // Start of generated code. + + void adc(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void adcs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void add(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void add(Condition cond, Register rd, const Operand& operand); + + void adds(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void adds(Register rd, const Operand& operand); + + void addw(Condition cond, Register rd, Register rn, const Operand& operand); + + void adr(Condition cond, EncodingSize size, Register rd, Location* location); + + void and_(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void ands(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void asr(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + + void asrs(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + + void b(Condition cond, EncodingSize size, Location* location); + + void bfc(Condition cond, Register rd, uint32_t lsb, uint32_t width); + + void bfi( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width); + + void bic(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void bics(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void bkpt(Condition cond, uint32_t imm); + + void bl(Condition cond, Location* location); + + void blx(Condition cond, Location* location); + + void blx(Condition cond, Register rm); + + void bx(Condition cond, Register rm); + + void bxj(Condition cond, Register rm); + + void cbnz(Register rn, Location* location); + + void cbz(Register rn, Location* location); + + void clrex(Condition cond); + + void clz(Condition cond, Register rd, Register rm); + + void cmn(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand); + + void cmp(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand); + + void crc32b(Condition cond, Register rd, Register rn, Register rm); + + void crc32cb(Condition cond, Register rd, Register rn, Register rm); + + void crc32ch(Condition cond, Register rd, Register rn, Register rm); + + void crc32cw(Condition cond, Register rd, Register rn, Register rm); + + void crc32h(Condition cond, Register rd, Register rn, Register rm); + + void crc32w(Condition cond, Register rd, Register rn, Register rm); + + void dmb(Condition cond, MemoryBarrier option); + + void dsb(Condition cond, MemoryBarrier option); + + void eor(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void eors(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void fldmdbx(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void fldmiax(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void fstmdbx(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void fstmiax(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void hlt(Condition cond, uint32_t imm); + + void hvc(Condition cond, uint32_t imm); + + void isb(Condition cond, MemoryBarrier option); + + void it(Condition cond, uint16_t mask); + + void lda(Condition cond, Register rt, const MemOperand& operand); + + void ldab(Condition cond, Register rt, const MemOperand& operand); + + void ldaex(Condition cond, Register rt, const MemOperand& operand); + + void ldaexb(Condition cond, Register rt, const MemOperand& operand); + + void ldaexd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand); + + void ldaexh(Condition cond, Register rt, const MemOperand& operand); + + void ldah(Condition cond, Register rt, const MemOperand& operand); + + void ldm(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + + void ldmda(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void ldmdb(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void ldmea(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void ldmed(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void ldmfa(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void ldmfd(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + + void ldmib(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void ldr(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + + void ldr(Condition cond, EncodingSize size, Register rt, Location* location); + + void ldrb(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + + void ldrb(Condition cond, Register rt, Location* location); + + void ldrd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand); + + void ldrd(Condition cond, Register rt, Register rt2, Location* location); + + void ldrex(Condition cond, Register rt, const MemOperand& operand); + + void ldrexb(Condition cond, Register rt, const MemOperand& operand); + + void ldrexd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand); + + void ldrexh(Condition cond, Register rt, const MemOperand& operand); + + void ldrh(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + + void ldrh(Condition cond, Register rt, Location* location); + + void ldrsb(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + + void ldrsb(Condition cond, Register rt, Location* location); + + void ldrsh(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + + void ldrsh(Condition cond, Register rt, Location* location); + + void lsl(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + + void lsls(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + + void lsr(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + + void lsrs(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + + void mla(Condition cond, Register rd, Register rn, Register rm, Register ra); + + void mlas(Condition cond, Register rd, Register rn, Register rm, Register ra); + + void mls(Condition cond, Register rd, Register rn, Register rm, Register ra); + + void mov(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + + void movs(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + + void movt(Condition cond, Register rd, const Operand& operand); + + void movw(Condition cond, Register rd, const Operand& operand); + + void mrs(Condition cond, Register rd, SpecialRegister spec_reg); + + void msr(Condition cond, + MaskedSpecialRegister spec_reg, + const Operand& operand); + + void mul( + Condition cond, EncodingSize size, Register rd, Register rn, Register rm); + + void muls(Condition cond, Register rd, Register rn, Register rm); + + void mvn(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + + void mvns(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + + void nop(Condition cond, EncodingSize size); + + void orn(Condition cond, Register rd, Register rn, const Operand& operand); + + void orns(Condition cond, Register rd, Register rn, const Operand& operand); + + void orr(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void orrs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void pkhbt(Condition cond, Register rd, Register rn, const Operand& operand); + + void pkhtb(Condition cond, Register rd, Register rn, const Operand& operand); + + void pld(Condition cond, Location* location); + + void pld(Condition cond, const MemOperand& operand); + + void pldw(Condition cond, const MemOperand& operand); + + void pli(Condition cond, const MemOperand& operand); + + void pli(Condition cond, Location* location); + + void pop(Condition cond, EncodingSize size, RegisterList registers); + + void pop(Condition cond, EncodingSize size, Register rt); + + void push(Condition cond, EncodingSize size, RegisterList registers); + + void push(Condition cond, EncodingSize size, Register rt); + + void qadd(Condition cond, Register rd, Register rm, Register rn); + + void qadd16(Condition cond, Register rd, Register rn, Register rm); + + void qadd8(Condition cond, Register rd, Register rn, Register rm); + + void qasx(Condition cond, Register rd, Register rn, Register rm); + + void qdadd(Condition cond, Register rd, Register rm, Register rn); + + void qdsub(Condition cond, Register rd, Register rm, Register rn); + + void qsax(Condition cond, Register rd, Register rn, Register rm); + + void qsub(Condition cond, Register rd, Register rm, Register rn); + + void qsub16(Condition cond, Register rd, Register rn, Register rm); + + void qsub8(Condition cond, Register rd, Register rn, Register rm); + + void rbit(Condition cond, Register rd, Register rm); + + void rev(Condition cond, EncodingSize size, Register rd, Register rm); + + void rev16(Condition cond, EncodingSize size, Register rd, Register rm); + + void revsh(Condition cond, EncodingSize size, Register rd, Register rm); + + void ror(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + + void rors(Condition cond, + EncodingSize size, + Register rd, + Register rm, + const Operand& operand); + + void rrx(Condition cond, Register rd, Register rm); + + void rrxs(Condition cond, Register rd, Register rm); + + void rsb(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void rsbs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void rsc(Condition cond, Register rd, Register rn, const Operand& operand); + + void rscs(Condition cond, Register rd, Register rn, const Operand& operand); + + void sadd16(Condition cond, Register rd, Register rn, Register rm); + + void sadd8(Condition cond, Register rd, Register rn, Register rm); + + void sasx(Condition cond, Register rd, Register rn, Register rm); + + void sbc(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void sbcs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void sbfx( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width); + + void sdiv(Condition cond, Register rd, Register rn, Register rm); + + void sel(Condition cond, Register rd, Register rn, Register rm); + + void shadd16(Condition cond, Register rd, Register rn, Register rm); + + void shadd8(Condition cond, Register rd, Register rn, Register rm); + + void shasx(Condition cond, Register rd, Register rn, Register rm); + + void shsax(Condition cond, Register rd, Register rn, Register rm); + + void shsub16(Condition cond, Register rd, Register rn, Register rm); + + void shsub8(Condition cond, Register rd, Register rn, Register rm); + + void smlabb( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smlabt( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smlad( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smladx( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smlal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smlalbb( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smlalbt( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smlald( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smlaldx( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smlals( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smlaltb( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smlaltt( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smlatb( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smlatt( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smlawb( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smlawt( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smlsd( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smlsdx( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smlsld( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smlsldx( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smmla( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smmlar( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smmls( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smmlsr( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void smmul(Condition cond, Register rd, Register rn, Register rm); + + void smmulr(Condition cond, Register rd, Register rn, Register rm); + + void smuad(Condition cond, Register rd, Register rn, Register rm); + + void smuadx(Condition cond, Register rd, Register rn, Register rm); + + void smulbb(Condition cond, Register rd, Register rn, Register rm); + + void smulbt(Condition cond, Register rd, Register rn, Register rm); + + void smull( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smulls( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void smultb(Condition cond, Register rd, Register rn, Register rm); + + void smultt(Condition cond, Register rd, Register rn, Register rm); + + void smulwb(Condition cond, Register rd, Register rn, Register rm); + + void smulwt(Condition cond, Register rd, Register rn, Register rm); + + void smusd(Condition cond, Register rd, Register rn, Register rm); + + void smusdx(Condition cond, Register rd, Register rn, Register rm); + + void ssat(Condition cond, Register rd, uint32_t imm, const Operand& operand); + + void ssat16(Condition cond, Register rd, uint32_t imm, Register rn); + + void ssax(Condition cond, Register rd, Register rn, Register rm); + + void ssub16(Condition cond, Register rd, Register rn, Register rm); + + void ssub8(Condition cond, Register rd, Register rn, Register rm); + + void stl(Condition cond, Register rt, const MemOperand& operand); + + void stlb(Condition cond, Register rt, const MemOperand& operand); + + void stlex(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + + void stlexb(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + + void stlexd(Condition cond, + Register rd, + Register rt, + Register rt2, + const MemOperand& operand); + + void stlexh(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + + void stlh(Condition cond, Register rt, const MemOperand& operand); + + void stm(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + + void stmda(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void stmdb(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + + void stmea(Condition cond, + EncodingSize size, + Register rn, + WriteBack write_back, + RegisterList registers); + + void stmed(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void stmfa(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void stmfd(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void stmib(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers); + + void str(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + + void strb(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + + void strd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand); + + void strex(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + + void strexb(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + + void strexd(Condition cond, + Register rd, + Register rt, + Register rt2, + const MemOperand& operand); + + void strexh(Condition cond, + Register rd, + Register rt, + const MemOperand& operand); + + void strh(Condition cond, + EncodingSize size, + Register rt, + const MemOperand& operand); + + void sub(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void sub(Condition cond, Register rd, const Operand& operand); + + void subs(Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand); + + void subs(Register rd, const Operand& operand); + + void subw(Condition cond, Register rd, Register rn, const Operand& operand); + + void svc(Condition cond, uint32_t imm); + + void sxtab(Condition cond, Register rd, Register rn, const Operand& operand); + + void sxtab16(Condition cond, + Register rd, + Register rn, + const Operand& operand); + + void sxtah(Condition cond, Register rd, Register rn, const Operand& operand); + + void sxtb(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + + void sxtb16(Condition cond, Register rd, const Operand& operand); + + void sxth(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + + void tbb(Condition cond, Register rn, Register rm); + + void tbh(Condition cond, Register rn, Register rm); + + void teq(Condition cond, Register rn, const Operand& operand); + + void tst(Condition cond, + EncodingSize size, + Register rn, + const Operand& operand); + + void uadd16(Condition cond, Register rd, Register rn, Register rm); + + void uadd8(Condition cond, Register rd, Register rn, Register rm); + + void uasx(Condition cond, Register rd, Register rn, Register rm); + + void ubfx( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width); + + void udf(Condition cond, EncodingSize size, uint32_t imm); + + void udiv(Condition cond, Register rd, Register rn, Register rm); + + void uhadd16(Condition cond, Register rd, Register rn, Register rm); + + void uhadd8(Condition cond, Register rd, Register rn, Register rm); + + void uhasx(Condition cond, Register rd, Register rn, Register rm); + + void uhsax(Condition cond, Register rd, Register rn, Register rm); + + void uhsub16(Condition cond, Register rd, Register rn, Register rm); + + void uhsub8(Condition cond, Register rd, Register rn, Register rm); + + void umaal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void umlal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void umlals( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void umull( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void umulls( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm); + + void uqadd16(Condition cond, Register rd, Register rn, Register rm); + + void uqadd8(Condition cond, Register rd, Register rn, Register rm); + + void uqasx(Condition cond, Register rd, Register rn, Register rm); + + void uqsax(Condition cond, Register rd, Register rn, Register rm); + + void uqsub16(Condition cond, Register rd, Register rn, Register rm); + + void uqsub8(Condition cond, Register rd, Register rn, Register rm); + + void usad8(Condition cond, Register rd, Register rn, Register rm); + + void usada8( + Condition cond, Register rd, Register rn, Register rm, Register ra); + + void usat(Condition cond, Register rd, uint32_t imm, const Operand& operand); + + void usat16(Condition cond, Register rd, uint32_t imm, Register rn); + + void usax(Condition cond, Register rd, Register rn, Register rm); + + void usub16(Condition cond, Register rd, Register rn, Register rm); + + void usub8(Condition cond, Register rd, Register rn, Register rm); + + void uxtab(Condition cond, Register rd, Register rn, const Operand& operand); + + void uxtab16(Condition cond, + Register rd, + Register rn, + const Operand& operand); + + void uxtah(Condition cond, Register rd, Register rn, const Operand& operand); + + void uxtb(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + + void uxtb16(Condition cond, Register rd, const Operand& operand); + + void uxth(Condition cond, + EncodingSize size, + Register rd, + const Operand& operand); + + void vaba( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vaba( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vabal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vabd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vabd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vabdl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vabs(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vabs(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vabs(Condition cond, DataType dt, SRegister rd, SRegister rm); + + void vacge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vacge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vacgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vacgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vacle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vacle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vaclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vaclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vadd( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vaddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm); + + void vaddl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vaddw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm); + + void vand(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand); + + void vand(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand); + + void vbic(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand); + + void vbic(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand); + + void vbif( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vbif( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vbit( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vbit( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vbsl( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vbsl( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vceq(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vceq(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vceq( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vceq( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vcge(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vcge(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vcge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vcge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vcgt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vcgt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vcgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vcgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vcle(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vcle(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vcle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vcle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vcls(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vcls(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vclt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vclt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vclz(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vclz(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vcmp(Condition cond, DataType dt, SRegister rd, const SOperand& operand); + + void vcmp(Condition cond, DataType dt, DRegister rd, const DOperand& operand); + + void vcmpe(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand); + + void vcmpe(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand); + + void vcnt(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vcnt(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm); + + void vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvt(Condition cond, + DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm, + int32_t fbits); + + void vcvt(Condition cond, + DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm, + int32_t fbits); + + void vcvt(Condition cond, + DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm, + int32_t fbits); + + void vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, DRegister rm); + + void vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, QRegister rm); + + void vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, QRegister rm); + + void vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, DRegister rm); + + void vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvta(DataType dt1, DataType dt2, DRegister rd, DRegister rm); + + void vcvta(DataType dt1, DataType dt2, QRegister rd, QRegister rm); + + void vcvta(DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvta(DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvtb( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm); + + void vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtm(DataType dt1, DataType dt2, DRegister rd, DRegister rm); + + void vcvtm(DataType dt1, DataType dt2, QRegister rd, QRegister rm); + + void vcvtm(DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvtm(DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtn(DataType dt1, DataType dt2, DRegister rd, DRegister rm); + + void vcvtn(DataType dt1, DataType dt2, QRegister rd, QRegister rm); + + void vcvtn(DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvtn(DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtp(DataType dt1, DataType dt2, DRegister rd, DRegister rm); + + void vcvtp(DataType dt1, DataType dt2, QRegister rd, QRegister rm); + + void vcvtp(DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvtp(DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm); + + void vcvtt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm); + + void vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm); + + void vdiv( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vdiv( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vdup(Condition cond, DataType dt, QRegister rd, Register rt); + + void vdup(Condition cond, DataType dt, DRegister rd, Register rt); + + void vdup(Condition cond, DataType dt, DRegister rd, DRegisterLane rm); + + void vdup(Condition cond, DataType dt, QRegister rd, DRegisterLane rm); + + void veor( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void veor( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vext(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister rm, + const DOperand& operand); + + void vext(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + QRegister rm, + const QOperand& operand); + + void vfma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vfma( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vfma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vfms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vfms( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vfms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vfnma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vfnma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vfnms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vfnms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vhsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vhsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vld1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + + void vld2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + + void vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + + void vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand); + + void vld4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + + void vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + + void vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + + void vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + + void vldr(Condition cond, DataType dt, DRegister rd, Location* location); + + void vldr(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand); + + void vldr(Condition cond, DataType dt, SRegister rd, Location* location); + + void vldr(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand); + + void vmax( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vmax( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vmaxnm(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vmaxnm(DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vmaxnm(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vmin( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vminnm(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vminnm(DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vminnm(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vmla(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm); + + void vmla(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm); + + void vmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vmla( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vmlal(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm); + + void vmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vmls(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm); + + void vmls(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm); + + void vmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vmls( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vmlsl(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm); + + void vmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vmov(Condition cond, Register rt, SRegister rn); + + void vmov(Condition cond, SRegister rn, Register rt); + + void vmov(Condition cond, Register rt, Register rt2, DRegister rm); + + void vmov(Condition cond, DRegister rm, Register rt, Register rt2); + + void vmov( + Condition cond, Register rt, Register rt2, SRegister rm, SRegister rm1); + + void vmov( + Condition cond, SRegister rm, SRegister rm1, Register rt, Register rt2); + + void vmov(Condition cond, DataType dt, DRegisterLane rd, Register rt); + + void vmov(Condition cond, DataType dt, DRegister rd, const DOperand& operand); + + void vmov(Condition cond, DataType dt, QRegister rd, const QOperand& operand); + + void vmov(Condition cond, DataType dt, SRegister rd, const SOperand& operand); + + void vmov(Condition cond, DataType dt, Register rt, DRegisterLane rn); + + void vmovl(Condition cond, DataType dt, QRegister rd, DRegister rm); + + void vmovn(Condition cond, DataType dt, DRegister rd, QRegister rm); + + void vmrs(Condition cond, RegisterOrAPSR_nzcv rt, SpecialFPRegister spec_reg); + + void vmsr(Condition cond, SpecialFPRegister spec_reg, Register rt); + + void vmul(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + + void vmul(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegister dm, + unsigned index); + + void vmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vmul( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vmull(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + + void vmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vmvn(Condition cond, DataType dt, DRegister rd, const DOperand& operand); + + void vmvn(Condition cond, DataType dt, QRegister rd, const QOperand& operand); + + void vneg(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vneg(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vneg(Condition cond, DataType dt, SRegister rd, SRegister rm); + + void vnmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vnmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vnmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vnmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vnmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vnmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vorn(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand); + + void vorn(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand); + + void vorr(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand); + + void vorr(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand); + + void vpadal(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vpadal(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vpadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vpaddl(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vpaddl(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vpmax( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vpmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vpop(Condition cond, DataType dt, DRegisterList dreglist); + + void vpop(Condition cond, DataType dt, SRegisterList sreglist); + + void vpush(Condition cond, DataType dt, DRegisterList dreglist); + + void vpush(Condition cond, DataType dt, SRegisterList sreglist); + + void vqabs(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vqabs(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vqadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vqadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vqdmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vqdmlal(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + + void vqdmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vqdmlsl(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index); + + void vqdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vqdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vqdmulh(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm); + + void vqdmulh(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm); + + void vqdmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vqdmull(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm); + + void vqmovn(Condition cond, DataType dt, DRegister rd, QRegister rm); + + void vqmovun(Condition cond, DataType dt, DRegister rd, QRegister rm); + + void vqneg(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vqneg(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vqrdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vqrdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vqrdmulh(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm); + + void vqrdmulh(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm); + + void vqrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn); + + void vqrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn); + + void vqrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + + void vqrshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + + void vqshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vqshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vqshlu(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vqshlu(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vqshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + + void vqshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + + void vqsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vqsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vraddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm); + + void vrecpe(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vrecpe(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vrecps( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vrecps( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vrev16(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vrev16(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vrev32(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vrev32(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vrev64(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vrev64(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vrhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vrhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vrinta(DataType dt, DRegister rd, DRegister rm); + + void vrinta(DataType dt, QRegister rd, QRegister rm); + + void vrinta(DataType dt, SRegister rd, SRegister rm); + + void vrintm(DataType dt, DRegister rd, DRegister rm); + + void vrintm(DataType dt, QRegister rd, QRegister rm); + + void vrintm(DataType dt, SRegister rd, SRegister rm); + + void vrintn(DataType dt, DRegister rd, DRegister rm); + + void vrintn(DataType dt, QRegister rd, QRegister rm); + + void vrintn(DataType dt, SRegister rd, SRegister rm); + + void vrintp(DataType dt, DRegister rd, DRegister rm); + + void vrintp(DataType dt, QRegister rd, QRegister rm); + + void vrintp(DataType dt, SRegister rd, SRegister rm); + + void vrintr(Condition cond, DataType dt, SRegister rd, SRegister rm); + + void vrintr(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vrintx(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vrintx(DataType dt, QRegister rd, QRegister rm); + + void vrintx(Condition cond, DataType dt, SRegister rd, SRegister rm); + + void vrintz(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vrintz(DataType dt, QRegister rd, QRegister rm); + + void vrintz(Condition cond, DataType dt, SRegister rd, SRegister rm); + + void vrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn); + + void vrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn); + + void vrshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vrshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + + void vrsqrte(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vrsqrte(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vrsqrts( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vrsqrts( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vrsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vrsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vrsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm); + + void vseleq(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vseleq(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vselge(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vselge(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vselgt(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vselgt(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vselvs(DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vselvs(DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vshll(Condition cond, + DataType dt, + QRegister rd, + DRegister rm, + const DOperand& operand); + + void vshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand); + + void vsli(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vsli(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vsqrt(Condition cond, DataType dt, SRegister rd, SRegister rm); + + void vsqrt(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vsri(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand); + + void vsri(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand); + + void vst1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + + void vst2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + + void vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + + void vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand); + + void vst4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand); + + void vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + + void vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + + void vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist); + + void vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist); + + void vstr(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand); + + void vstr(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand); + + void vsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vsub( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm); + + void vsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm); + + void vsubl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm); + + void vsubw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm); + + void vswp(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vswp(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vtbl(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm); + + void vtbx(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm); + + void vtrn(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vtrn(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vtst( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm); + + void vtst( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm); + + void vuzp(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vuzp(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void vzip(Condition cond, DataType dt, DRegister rd, DRegister rm); + + void vzip(Condition cond, DataType dt, QRegister rd, QRegister rm); + + void yield(Condition cond, EncodingSize size); + + int T32Size(uint32_t instr); + void DecodeT32(uint32_t instr); + void DecodeA32(uint32_t instr); +}; + +DataTypeValue Dt_L_imm6_1_Decode(uint32_t value, uint32_t type_value); +DataTypeValue Dt_L_imm6_2_Decode(uint32_t value, uint32_t type_value); +DataTypeValue Dt_L_imm6_3_Decode(uint32_t value); +DataTypeValue Dt_L_imm6_4_Decode(uint32_t value); +DataTypeValue Dt_imm6_1_Decode(uint32_t value, uint32_t type_value); +DataTypeValue Dt_imm6_2_Decode(uint32_t value, uint32_t type_value); +DataTypeValue Dt_imm6_3_Decode(uint32_t value); +DataTypeValue Dt_imm6_4_Decode(uint32_t value, uint32_t type_value); +DataTypeValue Dt_op_U_size_1_Decode(uint32_t value); +DataTypeValue Dt_op_size_1_Decode(uint32_t value); +DataTypeValue Dt_op_size_2_Decode(uint32_t value); +DataTypeValue Dt_op_size_3_Decode(uint32_t value); +DataTypeValue Dt_U_imm3H_1_Decode(uint32_t value); +DataTypeValue Dt_U_opc1_opc2_1_Decode(uint32_t value, unsigned* lane); +DataTypeValue Dt_opc1_opc2_1_Decode(uint32_t value, unsigned* lane); +DataTypeValue Dt_imm4_1_Decode(uint32_t value, unsigned* lane); +DataTypeValue Dt_B_E_1_Decode(uint32_t value); +DataTypeValue Dt_op_1_Decode1(uint32_t value); +DataTypeValue Dt_op_1_Decode2(uint32_t value); +DataTypeValue Dt_op_2_Decode(uint32_t value); +DataTypeValue Dt_op_3_Decode(uint32_t value); +DataTypeValue Dt_U_sx_1_Decode(uint32_t value); +DataTypeValue Dt_op_U_1_Decode1(uint32_t value); +DataTypeValue Dt_op_U_1_Decode2(uint32_t value); +DataTypeValue Dt_sz_1_Decode(uint32_t value); +DataTypeValue Dt_F_size_1_Decode(uint32_t value); +DataTypeValue Dt_F_size_2_Decode(uint32_t value); +DataTypeValue Dt_F_size_3_Decode(uint32_t value); +DataTypeValue Dt_F_size_4_Decode(uint32_t value); +DataTypeValue Dt_U_size_1_Decode(uint32_t value); +DataTypeValue Dt_U_size_2_Decode(uint32_t value); +DataTypeValue Dt_U_size_3_Decode(uint32_t value); +DataTypeValue Dt_size_1_Decode(uint32_t value); +DataTypeValue Dt_size_2_Decode(uint32_t value); +DataTypeValue Dt_size_3_Decode(uint32_t value); +DataTypeValue Dt_size_4_Decode(uint32_t value); +DataTypeValue Dt_size_5_Decode(uint32_t value); +DataTypeValue Dt_size_6_Decode(uint32_t value); +DataTypeValue Dt_size_7_Decode(uint32_t value); +DataTypeValue Dt_size_8_Decode(uint32_t value); +DataTypeValue Dt_size_9_Decode(uint32_t value, uint32_t type_value); +DataTypeValue Dt_size_10_Decode(uint32_t value); +DataTypeValue Dt_size_11_Decode(uint32_t value, uint32_t type_value); +DataTypeValue Dt_size_12_Decode(uint32_t value, uint32_t type_value); +DataTypeValue Dt_size_13_Decode(uint32_t value); +DataTypeValue Dt_size_14_Decode(uint32_t value); +DataTypeValue Dt_size_15_Decode(uint32_t value); +DataTypeValue Dt_size_16_Decode(uint32_t value); +DataTypeValue Dt_size_17_Decode(uint32_t value); +// End of generated code. + +class PrintDisassembler : public Disassembler { + public: + explicit PrintDisassembler(std::ostream& os, // NOLINT(runtime/references) + uint32_t code_address = 0) + : Disassembler(os, code_address) {} + explicit PrintDisassembler(DisassemblerStream* os, uint32_t code_address = 0) + : Disassembler(os, code_address) {} + + virtual void PrintCodeAddress(uint32_t code_address) { + os() << "0x" << std::hex << std::setw(8) << std::setfill('0') + << code_address << "\t"; + } + + virtual void PrintOpcode16(uint32_t opcode) { + os() << std::hex << std::setw(4) << std::setfill('0') << opcode << " " + << std::dec << "\t"; + } + + virtual void PrintOpcode32(uint32_t opcode) { + os() << std::hex << std::setw(8) << std::setfill('0') << opcode << std::dec + << "\t"; + } + + const uint32_t* DecodeA32At(const uint32_t* instruction_address) { + DecodeA32(*instruction_address); + return instruction_address + 1; + } + + // Returns the address of the next instruction. + const uint16_t* DecodeT32At(const uint16_t* instruction_address, + const uint16_t* buffer_end); + void DecodeT32(uint32_t instruction); + void DecodeA32(uint32_t instruction); + void DisassembleA32Buffer(const uint32_t* buffer, size_t size_in_bytes); + void DisassembleT32Buffer(const uint16_t* buffer, size_t size_in_bytes); +}; + +} // namespace aarch32 +} // namespace vixl + +#endif // VIXL_DISASM_AARCH32_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch32/instructions-aarch32.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/instructions-aarch32.cc new file mode 100644 index 00000000..2d1cb905 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/instructions-aarch32.cc @@ -0,0 +1,742 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +extern "C" { +#include +} + +#include +#include +#include +#include +#include + +#include "utils-vixl.h" +#include "aarch32/constants-aarch32.h" +#include "aarch32/instructions-aarch32.h" + +namespace vixl { +namespace aarch32 { + + +bool Shift::IsValidAmount(uint32_t amount) const { + switch (GetType()) { + case LSL: + return amount <= 31; + case ROR: + return (amount > 0) && (amount <= 31); + case LSR: + case ASR: + return (amount > 0) && (amount <= 32); + case RRX: + return amount == 0; + default: + VIXL_UNREACHABLE(); + return false; + } +} + + +std::ostream& operator<<(std::ostream& os, const Register reg) { + switch (reg.GetCode()) { + case 12: + return os << "ip"; + case 13: + return os << "sp"; + case 14: + return os << "lr"; + case 15: + return os << "pc"; + default: + return os << "r" << reg.GetCode(); + } +} + + +SRegister VRegister::S() const { + VIXL_ASSERT(GetType() == kSRegister); + return SRegister(GetCode()); +} + + +DRegister VRegister::D() const { + VIXL_ASSERT(GetType() == kDRegister); + return DRegister(GetCode()); +} + + +QRegister VRegister::Q() const { + VIXL_ASSERT(GetType() == kQRegister); + return QRegister(GetCode()); +} + + +Register RegisterList::GetFirstAvailableRegister() const { + for (uint32_t i = 0; i < kNumberOfRegisters; i++) { + if (((list_ >> i) & 1) != 0) return Register(i); + } + return Register(); +} + + +std::ostream& PrintRegisterList(std::ostream& os, // NOLINT(runtime/references) + uint32_t list) { + os << "{"; + bool first = true; + int code = 0; + while (list != 0) { + if ((list & 1) != 0) { + if (first) { + first = false; + } else { + os << ","; + } + os << Register(code); + } + list >>= 1; + code++; + } + os << "}"; + return os; +} + + +std::ostream& operator<<(std::ostream& os, RegisterList registers) { + return PrintRegisterList(os, registers.GetList()); +} + + +QRegister VRegisterList::GetFirstAvailableQRegister() const { + for (uint32_t i = 0; i < kNumberOfQRegisters; i++) { + if (((list_ >> (i * 4)) & 0xf) == 0xf) return QRegister(i); + } + return QRegister(); +} + + +DRegister VRegisterList::GetFirstAvailableDRegister() const { + for (uint32_t i = 0; i < kMaxNumberOfDRegisters; i++) { + if (((list_ >> (i * 2)) & 0x3) == 0x3) return DRegister(i); + } + return DRegister(); +} + + +SRegister VRegisterList::GetFirstAvailableSRegister() const { + for (uint32_t i = 0; i < kNumberOfSRegisters; i++) { + if (((list_ >> i) & 0x1) != 0) return SRegister(i); + } + return SRegister(); +} + + +std::ostream& operator<<(std::ostream& os, SRegisterList reglist) { + SRegister first = reglist.GetFirstSRegister(); + SRegister last = reglist.GetLastSRegister(); + if (first.Is(last)) + os << "{" << first << "}"; + else + os << "{" << first << "-" << last << "}"; + return os; +} + + +std::ostream& operator<<(std::ostream& os, DRegisterList reglist) { + DRegister first = reglist.GetFirstDRegister(); + DRegister last = reglist.GetLastDRegister(); + if (first.Is(last)) + os << "{" << first << "}"; + else + os << "{" << first << "-" << last << "}"; + return os; +} + +std::ostream& operator<<(std::ostream& os, NeonRegisterList nreglist) { + DRegister first = nreglist.GetFirstDRegister(); + int increment = nreglist.IsSingleSpaced() ? 1 : 2; + int count = + nreglist.GetLastDRegister().GetCode() - first.GetCode() + increment; + if (count < 0) count += kMaxNumberOfDRegisters; + os << "{"; + bool first_displayed = false; + for (;;) { + if (first_displayed) { + os << ","; + } else { + first_displayed = true; + } + os << first; + if (nreglist.IsTransferOneLane()) { + os << "[" << nreglist.GetTransferLane() << "]"; + } else if (nreglist.IsTransferAllLanes()) { + os << "[]"; + } + count -= increment; + if (count <= 0) break; + unsigned next = first.GetCode() + increment; + if (next >= kMaxNumberOfDRegisters) next -= kMaxNumberOfDRegisters; + first = DRegister(next); + } + os << "}"; + return os; +} + + +const char* SpecialRegister::GetName() const { + switch (reg_) { + case APSR: + return "APSR"; + case SPSR: + return "SPSR"; + } + VIXL_UNREACHABLE(); + return "??"; +} + + +const char* MaskedSpecialRegister::GetName() const { + switch (reg_) { + case APSR_nzcvq: + return "APSR_nzcvq"; + case APSR_g: + return "APSR_g"; + case APSR_nzcvqg: + return "APSR_nzcvqg"; + case CPSR_c: + return "CPSR_c"; + case CPSR_x: + return "CPSR_x"; + case CPSR_xc: + return "CPSR_xc"; + case CPSR_sc: + return "CPSR_sc"; + case CPSR_sx: + return "CPSR_sx"; + case CPSR_sxc: + return "CPSR_sxc"; + case CPSR_fc: + return "CPSR_fc"; + case CPSR_fx: + return "CPSR_fx"; + case CPSR_fxc: + return "CPSR_fxc"; + case CPSR_fsc: + return "CPSR_fsc"; + case CPSR_fsx: + return "CPSR_fsx"; + case CPSR_fsxc: + return "CPSR_fsxc"; + case SPSR_c: + return "SPSR_c"; + case SPSR_x: + return "SPSR_x"; + case SPSR_xc: + return "SPSR_xc"; + case SPSR_s: + return "SPSR_s"; + case SPSR_sc: + return "SPSR_sc"; + case SPSR_sx: + return "SPSR_sx"; + case SPSR_sxc: + return "SPSR_sxc"; + case SPSR_f: + return "SPSR_f"; + case SPSR_fc: + return "SPSR_fc"; + case SPSR_fx: + return "SPSR_fx"; + case SPSR_fxc: + return "SPSR_fxc"; + case SPSR_fs: + return "SPSR_fs"; + case SPSR_fsc: + return "SPSR_fsc"; + case SPSR_fsx: + return "SPSR_fsx"; + case SPSR_fsxc: + return "SPSR_fsxc"; + } + VIXL_UNREACHABLE(); + return "??"; +} + + +const char* BankedRegister::GetName() const { + switch (reg_) { + case R8_usr: + return "R8_usr"; + case R9_usr: + return "R9_usr"; + case R10_usr: + return "R10_usr"; + case R11_usr: + return "R11_usr"; + case R12_usr: + return "R12_usr"; + case SP_usr: + return "SP_usr"; + case LR_usr: + return "LR_usr"; + case R8_fiq: + return "R8_fiq"; + case R9_fiq: + return "R9_fiq"; + case R10_fiq: + return "R10_fiq"; + case R11_fiq: + return "R11_fiq"; + case R12_fiq: + return "R12_fiq"; + case SP_fiq: + return "SP_fiq"; + case LR_fiq: + return "LR_fiq"; + case LR_irq: + return "LR_irq"; + case SP_irq: + return "SP_irq"; + case LR_svc: + return "LR_svc"; + case SP_svc: + return "SP_svc"; + case LR_abt: + return "LR_abt"; + case SP_abt: + return "SP_abt"; + case LR_und: + return "LR_und"; + case SP_und: + return "SP_und"; + case LR_mon: + return "LR_mon"; + case SP_mon: + return "SP_mon"; + case ELR_hyp: + return "ELR_hyp"; + case SP_hyp: + return "SP_hyp"; + case SPSR_fiq: + return "SPSR_fiq"; + case SPSR_irq: + return "SPSR_irq"; + case SPSR_svc: + return "SPSR_svc"; + case SPSR_abt: + return "SPSR_abt"; + case SPSR_und: + return "SPSR_und"; + case SPSR_mon: + return "SPSR_mon"; + case SPSR_hyp: + return "SPSR_hyp"; + } + VIXL_UNREACHABLE(); + return "??"; +} + +const char* SpecialFPRegister::GetName() const { + switch (reg_) { + case FPSID: + return "FPSID"; + case FPSCR: + return "FPSCR"; + case MVFR2: + return "MVFR2"; + case MVFR1: + return "MVFR1"; + case MVFR0: + return "MVFR0"; + case FPEXC: + return "FPEXC"; + } + VIXL_UNREACHABLE(); + return "??"; +} + + +const char* Condition::GetName() const { + switch (condition_) { + case eq: + return "eq"; + case ne: + return "ne"; + case cs: + return "cs"; + case cc: + return "cc"; + case mi: + return "mi"; + case pl: + return "pl"; + case vs: + return "vs"; + case vc: + return "vc"; + case hi: + return "hi"; + case ls: + return "ls"; + case ge: + return "ge"; + case lt: + return "lt"; + case gt: + return "gt"; + case le: + return "le"; + case al: + return ""; + case Condition::kNone: + return ""; + } + return ""; +} + + +const char* Shift::GetName() const { + switch (shift_) { + case LSL: + return "lsl"; + case LSR: + return "lsr"; + case ASR: + return "asr"; + case ROR: + return "ror"; + case RRX: + return "rrx"; + } + VIXL_UNREACHABLE(); + return "??"; +} + + +const char* EncodingSize::GetName() const { + switch (size_) { + case Best: + case Narrow: + return ""; + case Wide: + return ".w"; + } + VIXL_UNREACHABLE(); + return "??"; +} + + +const char* DataType::GetName() const { + switch (value_) { + case kDataTypeValueInvalid: + return ".??"; + case kDataTypeValueNone: + return ""; + case S8: + return ".s8"; + case S16: + return ".s16"; + case S32: + return ".s32"; + case S64: + return ".s64"; + case U8: + return ".u8"; + case U16: + return ".u16"; + case U32: + return ".u32"; + case U64: + return ".u64"; + case F16: + return ".f16"; + case F32: + return ".f32"; + case F64: + return ".f64"; + case I8: + return ".i8"; + case I16: + return ".i16"; + case I32: + return ".i32"; + case I64: + return ".i64"; + case P8: + return ".p8"; + case P64: + return ".p64"; + case Untyped8: + return ".8"; + case Untyped16: + return ".16"; + case Untyped32: + return ".32"; + case Untyped64: + return ".64"; + } + VIXL_UNREACHABLE(); + return ".??"; +} + + +const char* MemoryBarrier::GetName() const { + switch (type_) { + case OSHLD: + return "oshld"; + case OSHST: + return "oshst"; + case OSH: + return "osh"; + case NSHLD: + return "nshld"; + case NSHST: + return "nshst"; + case NSH: + return "nsh"; + case ISHLD: + return "ishld"; + case ISHST: + return "ishst"; + case ISH: + return "ish"; + case LD: + return "ld"; + case ST: + return "st"; + case SY: + return "sy"; + } + switch (static_cast(type_)) { + case 0: + return "#0x0"; + case 4: + return "#0x4"; + case 8: + return "#0x8"; + case 0xc: + return "#0xc"; + } + VIXL_UNREACHABLE(); + return "??"; +} + + +const char* InterruptFlags::GetName() const { + switch (type_) { + case F: + return "f"; + case I: + return "i"; + case IF: + return "if"; + case A: + return "a"; + case AF: + return "af"; + case AI: + return "ai"; + case AIF: + return "aif"; + } + VIXL_ASSERT(type_ == 0); + return ""; +} + + +const char* Endianness::GetName() const { + switch (type_) { + case LE: + return "le"; + case BE: + return "be"; + } + VIXL_UNREACHABLE(); + return "??"; +} + + +// Constructor used for disassembly. +ImmediateShiftOperand::ImmediateShiftOperand(int shift_value, int amount_value) + : Shift(shift_value) { + switch (shift_value) { + case LSL: + amount_ = amount_value; + break; + case LSR: + case ASR: + amount_ = (amount_value == 0) ? 32 : amount_value; + break; + case ROR: + amount_ = amount_value; + if (amount_value == 0) SetType(RRX); + break; + default: + VIXL_UNREACHABLE(); + SetType(LSL); + amount_ = 0; + break; + } +} + + +ImmediateT32::ImmediateT32(uint32_t imm) { + // 00000000 00000000 00000000 abcdefgh + if ((imm & ~0xff) == 0) { + SetEncodingValue(imm); + return; + } + if ((imm >> 16) == (imm & 0xffff)) { + if ((imm & 0xff00) == 0) { + // 00000000 abcdefgh 00000000 abcdefgh + SetEncodingValue((imm & 0xff) | (0x1 << 8)); + return; + } + if ((imm & 0xff) == 0) { + // abcdefgh 00000000 abcdefgh 00000000 + SetEncodingValue(((imm >> 8) & 0xff) | (0x2 << 8)); + return; + } + if (((imm >> 8) & 0xff) == (imm & 0xff)) { + // abcdefgh abcdefgh abcdefgh abcdefgh + SetEncodingValue((imm & 0xff) | (0x3 << 8)); + return; + } + } + for (int shift = 0; shift < 24; shift++) { + uint32_t imm8 = imm >> (24 - shift); + uint32_t overflow = imm << (8 + shift); + if ((imm8 <= 0xff) && ((imm8 & 0x80) != 0) && (overflow == 0)) { + SetEncodingValue(((shift + 8) << 7) | (imm8 & 0x7F)); + return; + } + } +} + + +static inline uint32_t ror(uint32_t x, int i) { + VIXL_ASSERT((0 < i) && (i < 32)); + return (x >> i) | (x << (32 - i)); +} + + +bool ImmediateT32::IsImmediateT32(uint32_t imm) { + /* abcdefgh abcdefgh abcdefgh abcdefgh */ + if ((imm ^ ror(imm, 8)) == 0) return true; + /* 00000000 abcdefgh 00000000 abcdefgh */ + /* abcdefgh 00000000 abcdefgh 00000000 */ + if ((imm ^ ror(imm, 16)) == 0 && + (((imm & 0xff00) == 0) || ((imm & 0xff) == 0))) + return true; + /* isolate least-significant set bit */ + uint32_t lsb = imm & -imm; + /* if imm is less than lsb*256 then it fits, but instead we test imm/256 to + * avoid overflow (underflow is always a successful case) */ + return ((imm >> 8) < lsb); +} + + +uint32_t ImmediateT32::Decode(uint32_t value) { + uint32_t base = value & 0xff; + switch (value >> 8) { + case 0: + return base; + case 1: + return base | (base << 16); + case 2: + return (base << 8) | (base << 24); + case 3: + return base | (base << 8) | (base << 16) | (base << 24); + default: + base |= 0x80; + return base << (32 - (value >> 7)); + } +} + + +ImmediateA32::ImmediateA32(uint32_t imm) { + // Deal with rot = 0 first to avoid undefined shift by 32. + if (imm <= 0xff) { + SetEncodingValue(imm); + return; + } + for (int rot = 2; rot < 32; rot += 2) { + uint32_t imm8 = (imm << rot) | (imm >> (32 - rot)); + if (imm8 <= 0xff) { + SetEncodingValue((rot << 7) | imm8); + return; + } + } +} + + +bool ImmediateA32::IsImmediateA32(uint32_t imm) { + /* fast-out */ + if (imm < 256) return true; + /* avoid getting confused by wrapped-around bytes (this transform has no + * effect on pass/fail results) */ + if (imm & 0xff000000) imm = ror(imm, 16); + /* copy odd-numbered set bits into even-numbered bits immediately below, so + * that the least-significant set bit is always an even bit */ + imm = imm | ((imm >> 1) & 0x55555555); + /* isolate least-significant set bit (always even) */ + uint32_t lsb = imm & -imm; + /* if imm is less than lsb*256 then it fits, but instead we test imm/256 to + * avoid overflow (underflow is always a successful case) */ + return ((imm >> 8) < lsb); +} + + +uint32_t ImmediateA32::Decode(uint32_t value) { + int rotation = (value >> 8) * 2; + VIXL_ASSERT(rotation >= 0); + VIXL_ASSERT(rotation <= 30); + value &= 0xff; + if (rotation == 0) return value; + return (value >> rotation) | (value << (32 - rotation)); +} + + +uint32_t TypeEncodingValue(Shift shift) { + return shift.IsRRX() ? kRRXEncodedValue : shift.GetValue(); +} + + +uint32_t AmountEncodingValue(Shift shift, uint32_t amount) { + switch (shift.GetType()) { + case LSL: + case ROR: + return amount; + case LSR: + case ASR: + return amount % 32; + case RRX: + return 0; + } + return 0; +} + +} // namespace aarch32 +} // namespace vixl diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch32/instructions-aarch32.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/instructions-aarch32.h new file mode 100644 index 00000000..f11f2b02 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/instructions-aarch32.h @@ -0,0 +1,1359 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH32_INSTRUCTIONS_AARCH32_H_ +#define VIXL_AARCH32_INSTRUCTIONS_AARCH32_H_ + +extern "C" { +#include +} + +#include +#include + +#include "code-buffer-vixl.h" +#include "utils-vixl.h" +#include "aarch32/constants-aarch32.h" + +#ifdef __arm__ +#define HARDFLOAT __attribute__((noinline, pcs("aapcs-vfp"))) +#else +#define HARDFLOAT __attribute__((noinline)) +#endif + +namespace vixl { +namespace aarch32 { + +class Operand; +class SOperand; +class DOperand; +class QOperand; +class MemOperand; +class AlignedMemOperand; + +enum AddrMode { Offset = 0, PreIndex = 1, PostIndex = 2 }; + +class CPURegister { + public: + enum RegisterType { + kNoRegister = 0, + kRRegister = 1, + kSRegister = 2, + kDRegister = 3, + kQRegister = 4 + }; + + private: + static const int kCodeBits = 5; + static const int kTypeBits = 4; + static const int kSizeBits = 8; + static const int kCodeShift = 0; + static const int kTypeShift = kCodeShift + kCodeBits; + static const int kSizeShift = kTypeShift + kTypeBits; + static const uint32_t kCodeMask = ((1 << kCodeBits) - 1) << kCodeShift; + static const uint32_t kTypeMask = ((1 << kTypeBits) - 1) << kTypeShift; + static const uint32_t kSizeMask = ((1 << kSizeBits) - 1) << kSizeShift; + uint32_t value_; + + public: + CPURegister(RegisterType type, uint32_t code, int size) + : value_((type << kTypeShift) | (code << kCodeShift) | + (size << kSizeShift)) { +#ifdef VIXL_DEBUG + switch (type) { + case kNoRegister: + break; + case kRRegister: + VIXL_ASSERT(code < kNumberOfRegisters); + VIXL_ASSERT(size == kRegSizeInBits); + break; + case kSRegister: + VIXL_ASSERT(code < kNumberOfSRegisters); + VIXL_ASSERT(size == kSRegSizeInBits); + break; + case kDRegister: + VIXL_ASSERT(code < kMaxNumberOfDRegisters); + VIXL_ASSERT(size == kDRegSizeInBits); + break; + case kQRegister: + VIXL_ASSERT(code < kNumberOfQRegisters); + VIXL_ASSERT(size == kQRegSizeInBits); + break; + default: + VIXL_UNREACHABLE(); + break; + } +#endif + } + RegisterType GetType() const { + return static_cast((value_ & kTypeMask) >> kTypeShift); + } + bool IsRegister() const { return GetType() == kRRegister; } + bool IsS() const { return GetType() == kSRegister; } + bool IsD() const { return GetType() == kDRegister; } + bool IsQ() const { return GetType() == kQRegister; } + bool IsVRegister() const { return IsS() || IsD() || IsQ(); } + bool IsFPRegister() const { return IsS() || IsD(); } + uint32_t GetCode() const { return (value_ & kCodeMask) >> kCodeShift; } + uint32_t GetReg() const { return value_; } + int GetSizeInBits() const { return (value_ & kSizeMask) >> kSizeShift; } + int GetRegSizeInBytes() const { + return (GetType() == kNoRegister) ? 0 : (GetSizeInBits() / 8); + } + bool Is64Bits() const { return GetSizeInBits() == 64; } + bool Is128Bits() const { return GetSizeInBits() == 128; } + bool IsSameFormat(CPURegister reg) { + return (value_ & ~kCodeMask) == (reg.value_ & ~kCodeMask); + } + bool Is(CPURegister ref) const { return GetReg() == ref.GetReg(); } + bool IsValid() const { return GetType() != kNoRegister; } +}; + +class Register : public CPURegister { + public: + Register() : CPURegister(kNoRegister, 0, kRegSizeInBits) {} + explicit Register(uint32_t code) + : CPURegister(kRRegister, code % kNumberOfRegisters, kRegSizeInBits) { + VIXL_ASSERT(GetCode() < kNumberOfRegisters); + } + bool Is(Register ref) const { return GetCode() == ref.GetCode(); } + bool IsLow() const { return GetCode() < kNumberOfT32LowRegisters; } + bool IsLR() const { return GetCode() == kLrCode; } + bool IsPC() const { return GetCode() == kPcCode; } + bool IsSP() const { return GetCode() == kSpCode; } +}; + +std::ostream& operator<<(std::ostream& os, const Register reg); + +class RegisterOrAPSR_nzcv { + uint32_t code_; + + public: + explicit RegisterOrAPSR_nzcv(uint32_t code) : code_(code) { + VIXL_ASSERT(code_ < kNumberOfRegisters); + } + bool IsAPSR_nzcv() const { return code_ == kPcCode; } + uint32_t GetCode() const { return code_; } + Register AsRegister() const { + VIXL_ASSERT(!IsAPSR_nzcv()); + return Register(code_); + } +}; + +const RegisterOrAPSR_nzcv APSR_nzcv(kPcCode); + +inline std::ostream& operator<<(std::ostream& os, + const RegisterOrAPSR_nzcv reg) { + if (reg.IsAPSR_nzcv()) return os << "APSR_nzcv"; + return os << reg.AsRegister(); +} + +class SRegister; +class DRegister; +class QRegister; + +class VRegister : public CPURegister { + public: + VRegister() : CPURegister(kNoRegister, 0, 0) {} + VRegister(RegisterType type, uint32_t code, int size) + : CPURegister(type, code, size) {} + + SRegister S() const; + DRegister D() const; + QRegister Q() const; +}; + +class SRegister : public VRegister { + public: + SRegister() : VRegister(kNoRegister, 0, kSRegSizeInBits) {} + explicit SRegister(uint32_t code) + : VRegister(kSRegister, code, kSRegSizeInBits) {} + uint32_t Encode(int single_bit_field, int four_bit_field_lowest_bit) const { + if (four_bit_field_lowest_bit == 0) { + return ((GetCode() & 0x1) << single_bit_field) | + ((GetCode() & 0x1e) >> 1); + } + return ((GetCode() & 0x1) << single_bit_field) | + ((GetCode() & 0x1e) << (four_bit_field_lowest_bit - 1)); + } +}; + +inline unsigned ExtractSRegister(uint32_t instr, + int single_bit_field, + int four_bit_field_lowest_bit) { + VIXL_ASSERT(single_bit_field > 0); + if (four_bit_field_lowest_bit == 0) { + return ((instr << 1) & 0x1e) | ((instr >> single_bit_field) & 0x1); + } + return ((instr >> (four_bit_field_lowest_bit - 1)) & 0x1e) | + ((instr >> single_bit_field) & 0x1); +} + +inline std::ostream& operator<<(std::ostream& os, const SRegister reg) { + return os << "s" << reg.GetCode(); +} + +class DRegister : public VRegister { + public: + DRegister() : VRegister(kNoRegister, 0, kDRegSizeInBits) {} + explicit DRegister(uint32_t code) + : VRegister(kDRegister, code, kDRegSizeInBits) {} + SRegister GetLane(uint32_t lane) const { + uint32_t lane_count = kDRegSizeInBits / kSRegSizeInBits; + VIXL_ASSERT(lane < lane_count); + VIXL_ASSERT(GetCode() * lane_count < kNumberOfSRegisters); + return SRegister(GetCode() * lane_count + lane); + } + uint32_t Encode(int single_bit_field, int four_bit_field_lowest_bit) const { + VIXL_ASSERT(single_bit_field >= 4); + return ((GetCode() & 0x10) << (single_bit_field - 4)) | + ((GetCode() & 0xf) << four_bit_field_lowest_bit); + } +}; + +inline unsigned ExtractDRegister(uint32_t instr, + int single_bit_field, + int four_bit_field_lowest_bit) { + VIXL_ASSERT(single_bit_field >= 4); + return ((instr >> (single_bit_field - 4)) & 0x10) | + ((instr >> four_bit_field_lowest_bit) & 0xf); +} + +inline std::ostream& operator<<(std::ostream& os, const DRegister reg) { + return os << "d" << reg.GetCode(); +} + +enum DataTypeType { + kDataTypeS = 0x100, + kDataTypeU = 0x200, + kDataTypeF = 0x300, + kDataTypeI = 0x400, + kDataTypeP = 0x500, + kDataTypeUntyped = 0x600 +}; +const int kDataTypeSizeMask = 0x0ff; +const int kDataTypeTypeMask = 0x100; +enum DataTypeValue { + kDataTypeValueInvalid = 0x000, + kDataTypeValueNone = 0x001, // value used when dt is ignored. + S8 = kDataTypeS | 8, + S16 = kDataTypeS | 16, + S32 = kDataTypeS | 32, + S64 = kDataTypeS | 64, + U8 = kDataTypeU | 8, + U16 = kDataTypeU | 16, + U32 = kDataTypeU | 32, + U64 = kDataTypeU | 64, + F16 = kDataTypeF | 16, + F32 = kDataTypeF | 32, + F64 = kDataTypeF | 64, + I8 = kDataTypeI | 8, + I16 = kDataTypeI | 16, + I32 = kDataTypeI | 32, + I64 = kDataTypeI | 64, + P8 = kDataTypeP | 8, + P64 = kDataTypeP | 64, + Untyped8 = kDataTypeUntyped | 8, + Untyped16 = kDataTypeUntyped | 16, + Untyped32 = kDataTypeUntyped | 32, + Untyped64 = kDataTypeUntyped | 64 +}; + +class DataType { + DataTypeValue value_; + + public: + explicit DataType(uint32_t size) + : value_(static_cast(kDataTypeUntyped | size)) { + VIXL_ASSERT((size == 8) || (size == 16) || (size == 32) || (size == 64)); + } + // Users should be able to use "S8", "S6" and so forth to instantiate this + // class. + DataType(DataTypeValue value) : value_(value) {} // NOLINT(runtime/explicit) + DataTypeValue GetValue() const { return value_; } + DataTypeType GetType() const { + return static_cast(value_ & kDataTypeTypeMask); + } + uint32_t GetSize() const { return value_ & kDataTypeSizeMask; } + bool IsSize(uint32_t size) const { + return (value_ & kDataTypeSizeMask) == size; + } + const char* GetName() const; + bool Is(DataType type) const { return value_ == type.value_; } + bool Is(DataTypeValue value) const { return value_ == value; } + bool Is(DataTypeType type) const { return GetType() == type; } + bool IsNoneOr(DataTypeValue value) const { + return (value_ == value) || (value_ == kDataTypeValueNone); + } + bool Is(DataTypeType type, uint32_t size) const { + return value_ == static_cast(type | size); + } + bool IsNoneOr(DataTypeType type, uint32_t size) const { + return Is(type, size) || Is(kDataTypeValueNone); + } +}; + +inline std::ostream& operator<<(std::ostream& os, DataType dt) { + return os << dt.GetName(); +} + +class DRegisterLane : public DRegister { + uint32_t lane_; + + public: + DRegisterLane(DRegister reg, uint32_t lane) + : DRegister(reg.GetCode()), lane_(lane) {} + DRegisterLane(uint32_t code, uint32_t lane) : DRegister(code), lane_(lane) {} + uint32_t GetLane() const { return lane_; } + uint32_t EncodeX(DataType dt, + int single_bit_field, + int four_bit_field_lowest_bit) const { + VIXL_ASSERT(single_bit_field >= 4); + uint32_t value = lane_ << ((dt.GetSize() == 16) ? 3 : 4) | GetCode(); + return ((value & 0x10) << (single_bit_field - 4)) | + ((value & 0xf) << four_bit_field_lowest_bit); + } +}; + +inline unsigned ExtractDRegisterAndLane(uint32_t instr, + DataType dt, + int single_bit_field, + int four_bit_field_lowest_bit, + int* lane) { + VIXL_ASSERT(single_bit_field >= 4); + uint32_t value = ((instr >> (single_bit_field - 4)) & 0x10) | + ((instr >> four_bit_field_lowest_bit) & 0xf); + if (dt.GetSize() == 16) { + *lane = value >> 3; + return value & 0x7; + } + *lane = value >> 4; + return value & 0xf; +} + +inline std::ostream& operator<<(std::ostream& os, const DRegisterLane lane) { + os << "d" << lane.GetCode() << "["; + if (lane.GetLane() == static_cast(-1)) return os << "??]"; + return os << lane.GetLane() << "]"; +} + +class QRegister : public VRegister { + public: + QRegister() : VRegister(kNoRegister, 0, kQRegSizeInBits) {} + explicit QRegister(uint32_t code) + : VRegister(kQRegister, code, kQRegSizeInBits) {} + uint32_t Encode(int offset) { return GetCode() << offset; } + DRegister GetDLane(uint32_t lane) const { + uint32_t lane_count = kQRegSizeInBits / kDRegSizeInBits; + VIXL_ASSERT(lane < lane_count); + return DRegister(GetCode() * lane_count + lane); + } + DRegister GetLowDRegister() const { return DRegister(GetCode() * 2); } + DRegister GetHighDRegister() const { return DRegister(1 + GetCode() * 2); } + SRegister GetSLane(uint32_t lane) const { + uint32_t lane_count = kQRegSizeInBits / kSRegSizeInBits; + VIXL_ASSERT(lane < lane_count); + VIXL_ASSERT(GetCode() * lane_count < kNumberOfSRegisters); + return SRegister(GetCode() * lane_count + lane); + } + uint32_t Encode(int single_bit_field, int four_bit_field_lowest_bit) { + // Encode "code * 2". + VIXL_ASSERT(single_bit_field >= 3); + return ((GetCode() & 0x8) << (single_bit_field - 3)) | + ((GetCode() & 0x7) << (four_bit_field_lowest_bit + 1)); + } +}; + +inline unsigned ExtractQRegister(uint32_t instr, + int single_bit_field, + int four_bit_field_lowest_bit) { + VIXL_ASSERT(single_bit_field >= 3); + return ((instr >> (single_bit_field - 3)) & 0x8) | + ((instr >> (four_bit_field_lowest_bit + 1)) & 0x7); +} + +inline std::ostream& operator<<(std::ostream& os, const QRegister reg) { + return os << "q" << reg.GetCode(); +} + +// clang-format off +#define AARCH32_REGISTER_CODE_LIST(R) \ + R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ + R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) +// clang-format on +#define DEFINE_REGISTER(N) const Register r##N(N); +AARCH32_REGISTER_CODE_LIST(DEFINE_REGISTER) +#undef DEFINE_REGISTER +#undef AARCH32_REGISTER_CODE_LIST + +enum RegNum { kIPRegNum = 12, kSPRegNum = 13, kLRRegNum = 14, kPCRegNum = 15 }; + +const Register ip(kIPRegNum); +const Register sp(kSPRegNum); +const Register pc(kPCRegNum); +const Register lr(kLRRegNum); +const Register NoReg; +const VRegister NoVReg; + +// clang-format off +#define SREGISTER_CODE_LIST(R) \ + R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ + R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \ + R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \ + R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31) +// clang-format on +#define DEFINE_REGISTER(N) const SRegister s##N(N); +SREGISTER_CODE_LIST(DEFINE_REGISTER) +#undef DEFINE_REGISTER +#undef SREGISTER_CODE_LIST +const SRegister NoSReg; + +// clang-format off +#define DREGISTER_CODE_LIST(R) \ +R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ +R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \ +R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \ +R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31) +// clang-format on +#define DEFINE_REGISTER(N) const DRegister d##N(N); +DREGISTER_CODE_LIST(DEFINE_REGISTER) +#undef DEFINE_REGISTER +#undef DREGISTER_CODE_LIST +const DRegister NoDReg; + +// clang-format off +#define QREGISTER_CODE_LIST(R) \ + R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ + R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) +// clang-format on +#define DEFINE_REGISTER(N) const QRegister q##N(N); +QREGISTER_CODE_LIST(DEFINE_REGISTER) +#undef DEFINE_REGISTER +#undef QREGISTER_CODE_LIST +const QRegister NoQReg; + +class RegisterList { + public: + RegisterList() : list_(0) {} + RegisterList(Register reg) // NOLINT(runtime/explicit) + : list_(RegisterToList(reg)) {} + RegisterList(Register reg1, Register reg2) + : list_(RegisterToList(reg1) | RegisterToList(reg2)) {} + RegisterList(Register reg1, Register reg2, Register reg3) + : list_(RegisterToList(reg1) | RegisterToList(reg2) | + RegisterToList(reg3)) {} + RegisterList(Register reg1, Register reg2, Register reg3, Register reg4) + : list_(RegisterToList(reg1) | RegisterToList(reg2) | + RegisterToList(reg3) | RegisterToList(reg4)) {} + explicit RegisterList(uint32_t list) : list_(list) {} + uint32_t GetList() const { return list_; } + void SetList(uint32_t list) { list_ = list; } + bool Includes(const Register& reg) const { + return (list_ & RegisterToList(reg)) != 0; + } + void Combine(const RegisterList& other) { list_ |= other.GetList(); } + void Combine(const Register& reg) { list_ |= RegisterToList(reg); } + void Remove(const RegisterList& other) { list_ &= ~other.GetList(); } + void Remove(const Register& reg) { list_ &= ~RegisterToList(reg); } + bool Overlaps(const RegisterList& other) const { + return (list_ & other.list_) != 0; + } + bool IsR0toR7orPC() const { + // True if all the registers from the list are not from r8-r14. + return (list_ & 0x7f00) == 0; + } + bool IsR0toR7orLR() const { + // True if all the registers from the list are not from r8-r13 nor from r15. + return (list_ & 0xbf00) == 0; + } + Register GetFirstAvailableRegister() const; + bool IsEmpty() const { return list_ == 0; } + static RegisterList Union(const RegisterList& list_1, + const RegisterList& list_2) { + return RegisterList(list_1.list_ | list_2.list_); + } + static RegisterList Union(const RegisterList& list_1, + const RegisterList& list_2, + const RegisterList& list_3) { + return Union(list_1, Union(list_2, list_3)); + } + static RegisterList Union(const RegisterList& list_1, + const RegisterList& list_2, + const RegisterList& list_3, + const RegisterList& list_4) { + return Union(Union(list_1, list_2), Union(list_3, list_4)); + } + static RegisterList Intersection(const RegisterList& list_1, + const RegisterList& list_2) { + return RegisterList(list_1.list_ & list_2.list_); + } + static RegisterList Intersection(const RegisterList& list_1, + const RegisterList& list_2, + const RegisterList& list_3) { + return Intersection(list_1, Intersection(list_2, list_3)); + } + static RegisterList Intersection(const RegisterList& list_1, + const RegisterList& list_2, + const RegisterList& list_3, + const RegisterList& list_4) { + return Intersection(Intersection(list_1, list_2), + Intersection(list_3, list_4)); + } + + private: + static uint32_t RegisterToList(Register reg) { + if (reg.GetType() == CPURegister::kNoRegister) { + return 0; + } else { + return UINT32_C(1) << reg.GetCode(); + } + } + + // Bitfield representation of all registers in the list + // (1 for r0, 2 for r1, 4 for r2, ...). + uint32_t list_; +}; + +inline uint32_t GetRegisterListEncoding(const RegisterList& registers, + int first, + int count) { + return (registers.GetList() >> first) & ((1 << count) - 1); +} + +std::ostream& operator<<(std::ostream& os, RegisterList registers); + +class VRegisterList { + public: + VRegisterList() : list_(0) {} + explicit VRegisterList(VRegister reg) : list_(RegisterToList(reg)) {} + VRegisterList(VRegister reg1, VRegister reg2) + : list_(RegisterToList(reg1) | RegisterToList(reg2)) {} + VRegisterList(VRegister reg1, VRegister reg2, VRegister reg3) + : list_(RegisterToList(reg1) | RegisterToList(reg2) | + RegisterToList(reg3)) {} + VRegisterList(VRegister reg1, VRegister reg2, VRegister reg3, VRegister reg4) + : list_(RegisterToList(reg1) | RegisterToList(reg2) | + RegisterToList(reg3) | RegisterToList(reg4)) {} + explicit VRegisterList(uint64_t list) : list_(list) {} + uint64_t GetList() const { return list_; } + void SetList(uint64_t list) { list_ = list; } + // Because differently-sized V registers overlap with one another, there is no + // way to implement a single 'Includes' function in a way that is unsurprising + // for all existing uses. + bool IncludesAllOf(const VRegister& reg) const { + return (list_ & RegisterToList(reg)) == RegisterToList(reg); + } + bool IncludesAliasOf(const VRegister& reg) const { + return (list_ & RegisterToList(reg)) != 0; + } + void Combine(const VRegisterList& other) { list_ |= other.GetList(); } + void Combine(const VRegister& reg) { list_ |= RegisterToList(reg); } + void Remove(const VRegisterList& other) { list_ &= ~other.GetList(); } + void Remove(const VRegister& reg) { list_ &= ~RegisterToList(reg); } + bool Overlaps(const VRegisterList& other) const { + return (list_ & other.list_) != 0; + } + QRegister GetFirstAvailableQRegister() const; + DRegister GetFirstAvailableDRegister() const; + SRegister GetFirstAvailableSRegister() const; + bool IsEmpty() const { return list_ == 0; } + static VRegisterList Union(const VRegisterList& list_1, + const VRegisterList& list_2) { + return VRegisterList(list_1.list_ | list_2.list_); + } + static VRegisterList Union(const VRegisterList& list_1, + const VRegisterList& list_2, + const VRegisterList& list_3) { + return Union(list_1, Union(list_2, list_3)); + } + static VRegisterList Union(const VRegisterList& list_1, + const VRegisterList& list_2, + const VRegisterList& list_3, + const VRegisterList& list_4) { + return Union(Union(list_1, list_2), Union(list_3, list_4)); + } + static VRegisterList Intersection(const VRegisterList& list_1, + const VRegisterList& list_2) { + return VRegisterList(list_1.list_ & list_2.list_); + } + static VRegisterList Intersection(const VRegisterList& list_1, + const VRegisterList& list_2, + const VRegisterList& list_3) { + return Intersection(list_1, Intersection(list_2, list_3)); + } + static VRegisterList Intersection(const VRegisterList& list_1, + const VRegisterList& list_2, + const VRegisterList& list_3, + const VRegisterList& list_4) { + return Intersection(Intersection(list_1, list_2), + Intersection(list_3, list_4)); + } + + private: + static uint64_t RegisterToList(VRegister reg) { + if (reg.GetType() == CPURegister::kNoRegister) { + return 0; + } else { + switch (reg.GetSizeInBits()) { + case kQRegSizeInBits: + return UINT64_C(0xf) << (reg.GetCode() * 4); + case kDRegSizeInBits: + return UINT64_C(0x3) << (reg.GetCode() * 2); + case kSRegSizeInBits: + return UINT64_C(0x1) << reg.GetCode(); + default: + VIXL_UNREACHABLE(); + return 0; + } + } + } + + // Bitfield representation of all registers in the list. + // (0x3 for d0, 0xc0 for d1, 0x30 for d2, ...). We have one, two or four bits + // per register according to their size. This way we can make sure that we + // account for overlapping registers. + // A register is wholly included in this list only if all of its bits are set. + // A register is aliased by the list if at least one of its bits are set. + // The IncludesAllOf and IncludesAliasOf helpers are provided to make this + // distinction clear. + uint64_t list_; +}; + +class SRegisterList { + SRegister first_; + int length_; + + public: + explicit SRegisterList(SRegister reg) : first_(reg.GetCode()), length_(1) {} + SRegisterList(SRegister first, int length) + : first_(first.GetCode()), length_(length) { + VIXL_ASSERT(length >= 0); + } + SRegister GetSRegister(int n) const { + VIXL_ASSERT(n >= 0); + VIXL_ASSERT(n < length_); + return SRegister((first_.GetCode() + n) % kNumberOfSRegisters); + } + const SRegister& GetFirstSRegister() const { return first_; } + SRegister GetLastSRegister() const { return GetSRegister(length_ - 1); } + int GetLength() const { return length_; } +}; + +std::ostream& operator<<(std::ostream& os, SRegisterList registers); + +class DRegisterList { + DRegister first_; + int length_; + + public: + explicit DRegisterList(DRegister reg) : first_(reg.GetCode()), length_(1) {} + DRegisterList(DRegister first, int length) + : first_(first.GetCode()), length_(length) { + VIXL_ASSERT(length >= 0); + } + DRegister GetDRegister(int n) const { + VIXL_ASSERT(n >= 0); + VIXL_ASSERT(n < length_); + return DRegister((first_.GetCode() + n) % kMaxNumberOfDRegisters); + } + const DRegister& GetFirstDRegister() const { return first_; } + DRegister GetLastDRegister() const { return GetDRegister(length_ - 1); } + int GetLength() const { return length_; } +}; + +std::ostream& operator<<(std::ostream& os, DRegisterList registers); + +enum SpacingType { kSingle, kDouble }; + +enum TransferType { kMultipleLanes, kOneLane, kAllLanes }; + +class NeonRegisterList { + DRegister first_; + SpacingType spacing_; + TransferType type_; + int lane_; + int length_; + + public: + NeonRegisterList(DRegister reg, TransferType type) + : first_(reg.GetCode()), + spacing_(kSingle), + type_(type), + lane_(-1), + length_(1) { + VIXL_ASSERT(type_ != kOneLane); + } + NeonRegisterList(DRegister reg, int lane) + : first_(reg.GetCode()), + spacing_(kSingle), + type_(kOneLane), + lane_(lane), + length_(1) { + VIXL_ASSERT((lane_ >= 0) && (lane_ < 8)); + } + NeonRegisterList(DRegister first, + DRegister last, + SpacingType spacing, + TransferType type) + : first_(first.GetCode()), spacing_(spacing), type_(type), lane_(-1) { + VIXL_ASSERT(type != kOneLane); + VIXL_ASSERT(first.GetCode() <= last.GetCode()); + + int range = last.GetCode() - first.GetCode(); + VIXL_ASSERT(IsSingleSpaced() || IsMultiple(range, 2)); + length_ = (IsDoubleSpaced() ? (range / 2) : range) + 1; + + VIXL_ASSERT(length_ <= 4); + } + NeonRegisterList(DRegister first, + DRegister last, + SpacingType spacing, + int lane) + : first_(first.GetCode()), + spacing_(spacing), + type_(kOneLane), + lane_(lane) { + VIXL_ASSERT((lane >= 0) && (lane < 8)); + VIXL_ASSERT(first.GetCode() <= last.GetCode()); + + int range = last.GetCode() - first.GetCode(); + VIXL_ASSERT(IsSingleSpaced() || IsMultiple(range, 2)); + length_ = (IsDoubleSpaced() ? (range / 2) : range) + 1; + + VIXL_ASSERT(length_ <= 4); + } + DRegister GetDRegister(int n) const { + VIXL_ASSERT(n >= 0); + VIXL_ASSERT(n < length_); + unsigned code = first_.GetCode() + (IsDoubleSpaced() ? (2 * n) : n); + VIXL_ASSERT(code < kMaxNumberOfDRegisters); + return DRegister(code); + } + const DRegister& GetFirstDRegister() const { return first_; } + DRegister GetLastDRegister() const { return GetDRegister(length_ - 1); } + int GetLength() const { return length_; } + bool IsSingleSpaced() const { return spacing_ == kSingle; } + bool IsDoubleSpaced() const { return spacing_ == kDouble; } + bool IsTransferAllLanes() const { return type_ == kAllLanes; } + bool IsTransferOneLane() const { return type_ == kOneLane; } + bool IsTransferMultipleLanes() const { return type_ == kMultipleLanes; } + int GetTransferLane() const { return lane_; } +}; + +std::ostream& operator<<(std::ostream& os, NeonRegisterList registers); + +enum SpecialRegisterType { APSR = 0, CPSR = 0, SPSR = 1 }; + +class SpecialRegister { + uint32_t reg_; + + public: + explicit SpecialRegister(uint32_t reg) : reg_(reg) {} + SpecialRegister(SpecialRegisterType reg) // NOLINT(runtime/explicit) + : reg_(reg) {} + uint32_t GetReg() const { return reg_; } + const char* GetName() const; + bool Is(SpecialRegister value) const { return reg_ == value.reg_; } + bool Is(uint32_t value) const { return reg_ == value; } + bool IsNot(uint32_t value) const { return reg_ != value; } +}; + +inline std::ostream& operator<<(std::ostream& os, SpecialRegister reg) { + return os << reg.GetName(); +} + +enum BankedRegisterType { + R8_usr = 0x00, + R9_usr = 0x01, + R10_usr = 0x02, + R11_usr = 0x03, + R12_usr = 0x04, + SP_usr = 0x05, + LR_usr = 0x06, + R8_fiq = 0x08, + R9_fiq = 0x09, + R10_fiq = 0x0a, + R11_fiq = 0x0b, + R12_fiq = 0x0c, + SP_fiq = 0x0d, + LR_fiq = 0x0e, + LR_irq = 0x10, + SP_irq = 0x11, + LR_svc = 0x12, + SP_svc = 0x13, + LR_abt = 0x14, + SP_abt = 0x15, + LR_und = 0x16, + SP_und = 0x17, + LR_mon = 0x1c, + SP_mon = 0x1d, + ELR_hyp = 0x1e, + SP_hyp = 0x1f, + SPSR_fiq = 0x2e, + SPSR_irq = 0x30, + SPSR_svc = 0x32, + SPSR_abt = 0x34, + SPSR_und = 0x36, + SPSR_mon = 0x3c, + SPSR_hyp = 0x3e +}; + +class BankedRegister { + uint32_t reg_; + + public: + explicit BankedRegister(unsigned reg) : reg_(reg) {} + BankedRegister(BankedRegisterType reg) // NOLINT(runtime/explicit) + : reg_(reg) {} + uint32_t GetCode() const { return reg_; } + const char* GetName() const; +}; + +inline std::ostream& operator<<(std::ostream& os, BankedRegister reg) { + return os << reg.GetName(); +} + +enum MaskedSpecialRegisterType { + APSR_nzcvq = 0x08, + APSR_g = 0x04, + APSR_nzcvqg = 0x0c, + CPSR_c = 0x01, + CPSR_x = 0x02, + CPSR_xc = 0x03, + CPSR_s = APSR_g, + CPSR_sc = 0x05, + CPSR_sx = 0x06, + CPSR_sxc = 0x07, + CPSR_f = APSR_nzcvq, + CPSR_fc = 0x09, + CPSR_fx = 0x0a, + CPSR_fxc = 0x0b, + CPSR_fs = APSR_nzcvqg, + CPSR_fsc = 0x0d, + CPSR_fsx = 0x0e, + CPSR_fsxc = 0x0f, + SPSR_c = 0x11, + SPSR_x = 0x12, + SPSR_xc = 0x13, + SPSR_s = 0x14, + SPSR_sc = 0x15, + SPSR_sx = 0x16, + SPSR_sxc = 0x17, + SPSR_f = 0x18, + SPSR_fc = 0x19, + SPSR_fx = 0x1a, + SPSR_fxc = 0x1b, + SPSR_fs = 0x1c, + SPSR_fsc = 0x1d, + SPSR_fsx = 0x1e, + SPSR_fsxc = 0x1f +}; + +class MaskedSpecialRegister { + uint32_t reg_; + + public: + explicit MaskedSpecialRegister(uint32_t reg) : reg_(reg) { + VIXL_ASSERT(reg <= SPSR_fsxc); + } + MaskedSpecialRegister( + MaskedSpecialRegisterType reg) // NOLINT(runtime/explicit) + : reg_(reg) {} + uint32_t GetReg() const { return reg_; } + const char* GetName() const; + bool Is(MaskedSpecialRegister value) const { return reg_ == value.reg_; } + bool Is(uint32_t value) const { return reg_ == value; } + bool IsNot(uint32_t value) const { return reg_ != value; } +}; + +inline std::ostream& operator<<(std::ostream& os, MaskedSpecialRegister reg) { + return os << reg.GetName(); +} + +enum SpecialFPRegisterType { + FPSID = 0x0, + FPSCR = 0x1, + MVFR2 = 0x5, + MVFR1 = 0x6, + MVFR0 = 0x7, + FPEXC = 0x8 +}; + +class SpecialFPRegister { + uint32_t reg_; + + public: + explicit SpecialFPRegister(uint32_t reg) : reg_(reg) { +#ifdef VIXL_DEBUG + switch (reg) { + case FPSID: + case FPSCR: + case MVFR2: + case MVFR1: + case MVFR0: + case FPEXC: + break; + default: + VIXL_UNREACHABLE(); + } +#endif + } + SpecialFPRegister(SpecialFPRegisterType reg) // NOLINT(runtime/explicit) + : reg_(reg) {} + uint32_t GetReg() const { return reg_; } + const char* GetName() const; + bool Is(SpecialFPRegister value) const { return reg_ == value.reg_; } + bool Is(uint32_t value) const { return reg_ == value; } + bool IsNot(uint32_t value) const { return reg_ != value; } +}; + +inline std::ostream& operator<<(std::ostream& os, SpecialFPRegister reg) { + return os << reg.GetName(); +} + +class CRegister { + uint32_t code_; + + public: + explicit CRegister(uint32_t code) : code_(code) { + VIXL_ASSERT(code < kNumberOfRegisters); + } + uint32_t GetCode() const { return code_; } + bool Is(CRegister value) const { return code_ == value.code_; } +}; + +inline std::ostream& operator<<(std::ostream& os, const CRegister reg) { + return os << "c" << reg.GetCode(); +} + +// clang-format off +#define CREGISTER_CODE_LIST(R) \ + R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ + R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) +// clang-format on +#define DEFINE_CREGISTER(N) const CRegister c##N(N); +CREGISTER_CODE_LIST(DEFINE_CREGISTER) + +enum CoprocessorName { p10 = 10, p11 = 11, p14 = 14, p15 = 15 }; + +class Coprocessor { + uint32_t coproc_; + + public: + explicit Coprocessor(uint32_t coproc) : coproc_(coproc) {} + Coprocessor(CoprocessorName coproc) // NOLINT(runtime/explicit) + : coproc_(static_cast(coproc)) {} + bool Is(Coprocessor coproc) const { return coproc_ == coproc.coproc_; } + bool Is(CoprocessorName coproc) const { return coproc_ == coproc; } + uint32_t GetCoprocessor() const { return coproc_; } +}; + +inline std::ostream& operator<<(std::ostream& os, Coprocessor coproc) { + return os << "p" << coproc.GetCoprocessor(); +} + +enum ConditionType { + eq = 0, + ne = 1, + cs = 2, + cc = 3, + mi = 4, + pl = 5, + vs = 6, + vc = 7, + hi = 8, + ls = 9, + ge = 10, + lt = 11, + gt = 12, + le = 13, + al = 14, + hs = cs, + lo = cc +}; + +class Condition { + uint32_t condition_; + static const uint32_t kNever = 15; + static const uint32_t kMask = 0xf; + static const uint32_t kNone = 0x10 | al; + + public: + static const Condition None() { return Condition(kNone); } + static const Condition Never() { return Condition(kNever); } + explicit Condition(uint32_t condition) : condition_(condition) { + VIXL_ASSERT(condition <= kNone); + } + // Users should be able to use "eq", "ne" and so forth to instantiate this + // class. + Condition(ConditionType condition) // NOLINT(runtime/explicit) + : condition_(condition) {} + uint32_t GetCondition() const { return condition_ & kMask; } + bool IsNone() const { return condition_ == kNone; } + const char* GetName() const; + bool Is(Condition value) const { return condition_ == value.condition_; } + bool Is(uint32_t value) const { return condition_ == value; } + bool IsNot(uint32_t value) const { return condition_ != value; } + bool IsNever() const { return condition_ == kNever; } + bool IsNotNever() const { return condition_ != kNever; } + Condition Negate() const { + VIXL_ASSERT(IsNot(al) && IsNot(kNever)); + return Condition(condition_ ^ 1); + } +}; + +inline std::ostream& operator<<(std::ostream& os, Condition condition) { + return os << condition.GetName(); +} + +enum SignType { plus, minus }; + +class Sign { + public: + Sign() : sign_(plus) {} + Sign(SignType sign) : sign_(sign) {} // NOLINT(runtime/explicit) + const char* GetName() const { return (IsPlus() ? "" : "-"); } + bool IsPlus() const { return sign_ == plus; } + bool IsMinus() const { return sign_ == minus; } + int32_t ApplyTo(uint32_t value) { return IsPlus() ? value : -value; } + + private: + SignType sign_; +}; + +inline std::ostream& operator<<(std::ostream& os, Sign sign) { + return os << sign.GetName(); +} + +enum ShiftType { LSL = 0x0, LSR = 0x1, ASR = 0x2, ROR = 0x3, RRX = 0x4 }; + +class Shift { + public: + Shift() : shift_(LSL) {} + Shift(ShiftType shift) : shift_(shift) {} // NOLINT(runtime/explicit) + explicit Shift(uint32_t shift) : shift_(static_cast(shift)) {} + const Shift& GetShift() const { return *this; } + ShiftType GetType() const { return shift_; } + uint32_t GetValue() const { return shift_; } + const char* GetName() const; + bool IsLSL() const { return shift_ == LSL; } + bool IsLSR() const { return shift_ == LSR; } + bool IsASR() const { return shift_ == ASR; } + bool IsROR() const { return shift_ == ROR; } + bool IsRRX() const { return shift_ == RRX; } + bool Is(Shift value) const { return shift_ == value.shift_; } + bool IsNot(Shift value) const { return shift_ != value.shift_; } + bool IsValidAmount(uint32_t amount) const; + static const Shift NoShift; + + protected: + void SetType(ShiftType s) { shift_ = s; } + + private: + ShiftType shift_; +}; + +inline std::ostream& operator<<(std::ostream& os, Shift shift) { + return os << shift.GetName(); +} + +class ImmediateShiftOperand : public Shift { + public: + // Constructor used for assembly. + ImmediateShiftOperand(Shift shift, uint32_t amount) + : Shift(shift), amount_(amount) { +#ifdef VIXL_DEBUG + switch (shift.GetType()) { + case LSL: + VIXL_ASSERT(amount <= 31); + break; + case ROR: + VIXL_ASSERT(amount > 0); + VIXL_ASSERT(amount <= 31); + break; + case LSR: + case ASR: + VIXL_ASSERT(amount > 0); + VIXL_ASSERT(amount <= 32); + break; + case RRX: + VIXL_ASSERT(amount == 0); + break; + default: + VIXL_UNREACHABLE(); + break; + } +#endif + } + // Constructor used for disassembly. + ImmediateShiftOperand(int shift, int amount); + uint32_t GetAmount() const { return amount_; } + bool Is(const ImmediateShiftOperand& rhs) const { + return amount_ == (rhs.amount_) && Shift::Is(*this); + } + + private: + uint32_t amount_; +}; + +inline std::ostream& operator<<(std::ostream& os, + ImmediateShiftOperand const& shift_operand) { + if (shift_operand.IsLSL() && shift_operand.GetAmount() == 0) return os; + if (shift_operand.IsRRX()) return os << ", rrx"; + return os << ", " << shift_operand.GetName() << " #" + << shift_operand.GetAmount(); +} + +class RegisterShiftOperand : public Shift { + public: + RegisterShiftOperand(ShiftType shift, Register shift_register) + : Shift(shift), shift_register_(shift_register) { + VIXL_ASSERT(!IsRRX() && shift_register_.IsValid()); + } + const Register GetShiftRegister() const { return shift_register_; } + bool Is(const RegisterShiftOperand& rhs) const { + return shift_register_.Is(rhs.shift_register_) && Shift::Is(*this); + } + + private: + Register shift_register_; +}; + +inline std::ostream& operator<<(std::ostream& s, + const RegisterShiftOperand& shift_operand) { + return s << shift_operand.GetName() << " " + << shift_operand.GetShiftRegister(); +} + +enum EncodingSizeType { Best, Narrow, Wide }; + +class EncodingSize { + uint32_t size_; + + public: + explicit EncodingSize(uint32_t size) : size_(size) {} + EncodingSize(EncodingSizeType size) // NOLINT(runtime/explicit) + : size_(size) {} + uint32_t GetSize() const { return size_; } + const char* GetName() const; + bool IsBest() const { return size_ == Best; } + bool IsNarrow() const { return size_ == Narrow; } + bool IsWide() const { return size_ == Wide; } +}; + +inline std::ostream& operator<<(std::ostream& os, EncodingSize size) { + return os << size.GetName(); +} + +enum WriteBackValue { NO_WRITE_BACK, WRITE_BACK }; + +class WriteBack { + WriteBackValue value_; + + public: + WriteBack(WriteBackValue value) // NOLINT(runtime/explicit) + : value_(value) {} + explicit WriteBack(int value) + : value_((value == 0) ? NO_WRITE_BACK : WRITE_BACK) {} + uint32_t GetWriteBackUint32() const { return (value_ == WRITE_BACK) ? 1 : 0; } + bool DoesWriteBack() const { return value_ == WRITE_BACK; } +}; + +inline std::ostream& operator<<(std::ostream& os, WriteBack write_back) { + if (write_back.DoesWriteBack()) return os << "!"; + return os; +} + +class EncodingValue { + bool valid_; + uint32_t encoding_value_; + + public: + EncodingValue() { + valid_ = false; + encoding_value_ = 0; + } + bool IsValid() const { return valid_; } + uint32_t GetEncodingValue() const { return encoding_value_; } + void SetEncodingValue(uint32_t encoding_value) { + valid_ = true; + encoding_value_ = encoding_value; + } +}; + +class EncodingValueAndImmediate : public EncodingValue { + uint32_t encoded_immediate_; + + public: + EncodingValueAndImmediate() { encoded_immediate_ = 0; } + uint32_t GetEncodedImmediate() const { return encoded_immediate_; } + void SetEncodedImmediate(uint32_t encoded_immediate) { + encoded_immediate_ = encoded_immediate; + } +}; + +class ImmediateT32 : public EncodingValue { + public: + explicit ImmediateT32(uint32_t imm); + static bool IsImmediateT32(uint32_t imm); + static uint32_t Decode(uint32_t value); +}; + +class ImmediateA32 : public EncodingValue { + public: + explicit ImmediateA32(uint32_t imm); + static bool IsImmediateA32(uint32_t imm); + static uint32_t Decode(uint32_t value); +}; + +// Return the encoding value of a shift type. +uint32_t TypeEncodingValue(Shift shift); +// Return the encoding value for a shift amount depending on the shift type. +uint32_t AmountEncodingValue(Shift shift, uint32_t amount); + +enum MemoryBarrierType { + OSHLD = 0x1, + OSHST = 0x2, + OSH = 0x3, + NSHLD = 0x5, + NSHST = 0x6, + NSH = 0x7, + ISHLD = 0x9, + ISHST = 0xa, + ISH = 0xb, + LD = 0xd, + ST = 0xe, + SY = 0xf +}; + +class MemoryBarrier { + MemoryBarrierType type_; + + public: + MemoryBarrier(MemoryBarrierType type) // NOLINT(runtime/explicit) + : type_(type) {} + MemoryBarrier(uint32_t type) // NOLINT(runtime/explicit) + : type_(static_cast(type)) { + VIXL_ASSERT((type & 0x3) != 0); + } + MemoryBarrierType GetType() const { return type_; } + const char* GetName() const; +}; + +inline std::ostream& operator<<(std::ostream& os, MemoryBarrier option) { + return os << option.GetName(); +} + +enum InterruptFlagsType { + F = 0x1, + I = 0x2, + IF = 0x3, + A = 0x4, + AF = 0x5, + AI = 0x6, + AIF = 0x7 +}; + +class InterruptFlags { + InterruptFlagsType type_; + + public: + InterruptFlags(InterruptFlagsType type) // NOLINT(runtime/explicit) + : type_(type) {} + InterruptFlags(uint32_t type) // NOLINT(runtime/explicit) + : type_(static_cast(type)) { + VIXL_ASSERT(type <= 7); + } + InterruptFlagsType GetType() const { return type_; } + const char* GetName() const; +}; + +inline std::ostream& operator<<(std::ostream& os, InterruptFlags option) { + return os << option.GetName(); +} + +enum EndiannessType { LE = 0, BE = 1 }; + +class Endianness { + EndiannessType type_; + + public: + Endianness(EndiannessType type) : type_(type) {} // NOLINT(runtime/explicit) + Endianness(uint32_t type) // NOLINT(runtime/explicit) + : type_(static_cast(type)) { + VIXL_ASSERT(type <= 1); + } + EndiannessType GetType() const { return type_; } + const char* GetName() const; +}; + +inline std::ostream& operator<<(std::ostream& os, Endianness endian_specifier) { + return os << endian_specifier.GetName(); +} + +enum AlignmentType { + k16BitAlign = 0, + k32BitAlign = 1, + k64BitAlign = 2, + k128BitAlign = 3, + k256BitAlign = 4, + kNoAlignment = 5, + kBadAlignment = 6 +}; + +class Alignment { + AlignmentType align_; + + public: + Alignment(AlignmentType align) // NOLINT(runtime/explicit) + : align_(align) {} + Alignment(uint32_t align) // NOLINT(runtime/explicit) + : align_(static_cast(align)) { + VIXL_ASSERT(align <= static_cast(k256BitAlign)); + } + AlignmentType GetType() const { return align_; } + bool Is(AlignmentType type) { return align_ == type; } +}; + +inline std::ostream& operator<<(std::ostream& os, Alignment align) { + if (align.GetType() == kBadAlignment) return os << " :??"; + if (align.GetType() == kNoAlignment) return os; + return os << " :" << (0x10 << static_cast(align.GetType())); +} + +// Structure containing information on forward references. +struct ReferenceInfo { + int size; + int min_offset; + int max_offset; + int alignment; // As a power of two. + enum { kAlignPc, kDontAlignPc } pc_needs_aligning; +}; + +} // namespace aarch32 +} // namespace vixl + +#endif // VIXL_AARCH32_INSTRUCTIONS_AARCH32_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch32/location-aarch32.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/location-aarch32.cc new file mode 100644 index 00000000..d61aafa9 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/location-aarch32.cc @@ -0,0 +1,152 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "location-aarch32.h" + +#include "assembler-aarch32.h" +#include "macro-assembler-aarch32.h" + +namespace vixl { + +namespace aarch32 { + +bool Location::Needs16BitPadding(int32_t location) const { + if (!HasForwardReferences()) return false; + const ForwardRef& last_ref = GetLastForwardReference(); + int32_t min_location_last_ref = last_ref.GetMinLocation(); + VIXL_ASSERT(min_location_last_ref - location <= 2); + return (min_location_last_ref > location); +} + +void Location::ResolveReferences(internal::AssemblerBase* assembler) { + // Iterate over references and call EncodeLocationFor on each of them. + for (ForwardRefListIterator it(this); !it.Done(); it.Advance()) { + const ForwardRef& reference = *it.Current(); + VIXL_ASSERT(reference.LocationIsEncodable(location_)); + int32_t from = reference.GetLocation(); + EncodeLocationFor(assembler, from, reference.op()); + } + forward_.clear(); +} + +static bool Is16BitEncoding(uint16_t instr) { + return instr < (kLowestT32_32Opcode >> 16); +} + +void Location::EncodeLocationFor(internal::AssemblerBase* assembler, + int32_t from, + const Location::EmitOperator* encoder) { + if (encoder->IsUsingT32()) { + uint16_t* instr_ptr = + assembler->GetBuffer()->GetOffsetAddress(from); + if (Is16BitEncoding(instr_ptr[0])) { + // The Encode methods always deals with uint32_t types so we need + // to explicitly cast it. + uint32_t instr = static_cast(instr_ptr[0]); + instr = encoder->Encode(instr, from, this); + // The Encode method should not ever set the top 16 bits. + VIXL_ASSERT((instr & ~0xffff) == 0); + instr_ptr[0] = static_cast(instr); + } else { + uint32_t instr = + instr_ptr[1] | (static_cast(instr_ptr[0]) << 16); + instr = encoder->Encode(instr, from, this); + instr_ptr[0] = static_cast(instr >> 16); + instr_ptr[1] = static_cast(instr); + } + } else { + uint32_t* instr_ptr = + assembler->GetBuffer()->GetOffsetAddress(from); + instr_ptr[0] = encoder->Encode(instr_ptr[0], from, this); + } +} + +void Location::AddForwardRef(int32_t instr_location, + const EmitOperator& op, + const ReferenceInfo* info) { + VIXL_ASSERT(referenced_); + int32_t from = instr_location + (op.IsUsingT32() ? kT32PcDelta : kA32PcDelta); + if (info->pc_needs_aligning == ReferenceInfo::kAlignPc) + from = AlignDown(from, 4); + int32_t min_object_location = from + info->min_offset; + int32_t max_object_location = from + info->max_offset; + forward_.insert(ForwardRef(&op, + instr_location, + info->size, + min_object_location, + max_object_location, + info->alignment)); +} + +int Location::GetMaxAlignment() const { + int max_alignment = GetPoolObjectAlignment(); + for (ForwardRefListIterator it(const_cast(this)); !it.Done(); + it.Advance()) { + const ForwardRef& reference = *it.Current(); + if (reference.GetAlignment() > max_alignment) + max_alignment = reference.GetAlignment(); + } + return max_alignment; +} + +int Location::GetMinLocation() const { + int32_t min_location = 0; + for (ForwardRefListIterator it(const_cast(this)); !it.Done(); + it.Advance()) { + const ForwardRef& reference = *it.Current(); + if (reference.GetMinLocation() > min_location) + min_location = reference.GetMinLocation(); + } + return min_location; +} + +void Label::UpdatePoolObject(PoolObject* object) { + VIXL_ASSERT(forward_.size() == 1); + const ForwardRef& reference = forward_.Front(); + object->Update(reference.GetMinLocation(), + reference.GetMaxLocation(), + reference.GetAlignment()); +} + +void Label::EmitPoolObject(MacroAssemblerInterface* masm) { + MacroAssembler* macro_assembler = static_cast(masm); + + // Add a new branch to this label. + macro_assembler->GetBuffer()->EnsureSpaceFor(kMaxInstructionSizeInBytes); + ExactAssemblyScopeWithoutPoolsCheck guard(macro_assembler, + kMaxInstructionSizeInBytes, + ExactAssemblyScope::kMaximumSize); + macro_assembler->b(this); +} + +void RawLiteral::EmitPoolObject(MacroAssemblerInterface* masm) { + Assembler* assembler = static_cast(masm->AsAssemblerBase()); + + assembler->GetBuffer()->EnsureSpaceFor(GetSize()); + assembler->GetBuffer()->EmitData(GetDataAddress(), GetSize()); +} +} +} diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch32/location-aarch32.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/location-aarch32.h new file mode 100644 index 00000000..0f29a6c6 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/location-aarch32.h @@ -0,0 +1,411 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH32_LABEL_AARCH32_H_ +#define VIXL_AARCH32_LABEL_AARCH32_H_ + +extern "C" { +#include +} + +#include +#include +#include +#include + +#include "invalset-vixl.h" +#include "pool-manager.h" +#include "utils-vixl.h" + +#include "constants-aarch32.h" +#include "instructions-aarch32.h" + +namespace vixl { + +namespace aarch32 { + +class MacroAssembler; + +class Location : public LocationBase { + friend class Assembler; + friend class MacroAssembler; + + public: + // Unbound location that can be used with the assembler bind() method and + // with the assembler methods for generating instructions, but will never + // be handled by the pool manager. + Location() + : LocationBase(kRawLocation, 1 /* dummy size*/), + referenced_(false) {} + + typedef int32_t Offset; + + ~Location() { +#ifdef VIXL_DEBUG + if (IsReferenced() && !IsBound()) { + VIXL_ABORT_WITH_MSG("Location, label or literal used but not bound.\n"); + } +#endif + } + + bool IsReferenced() const { return referenced_; } + + private: + class EmitOperator { + public: + explicit EmitOperator(InstructionSet isa) : isa_(isa) { +#if defined(VIXL_INCLUDE_TARGET_A32_ONLY) + USE(isa_); + VIXL_ASSERT(isa == A32); +#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY) + USE(isa_); + VIXL_ASSERT(isa == T32); +#endif + } + virtual ~EmitOperator() {} + virtual uint32_t Encode(uint32_t /*instr*/, + Location::Offset /*pc*/, + const Location* /*label*/) const { + return 0; + } +#if defined(VIXL_INCLUDE_TARGET_A32_ONLY) + bool IsUsingT32() const { return false; } +#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY) + bool IsUsingT32() const { return true; } +#else + bool IsUsingT32() const { return isa_ == T32; } +#endif + + private: + InstructionSet isa_; + }; + + protected: + class ForwardRef : public ForwardReference { + public: + // Default constructor for InvalSet. + ForwardRef() : ForwardReference(0, 0, 0, 0, 1), op_(NULL) {} + + ForwardRef(const Location::EmitOperator* op, + int32_t location, + int size, + int32_t min_object_location, + int32_t max_object_location, + int object_alignment = 1) + : ForwardReference(location, + size, + min_object_location, + max_object_location, + object_alignment), + op_(op) {} + + const Location::EmitOperator* op() const { return op_; } + + // We must provide comparison operators to work with InvalSet. + bool operator==(const ForwardRef& other) const { + return GetLocation() == other.GetLocation(); + } + bool operator<(const ForwardRef& other) const { + return GetLocation() < other.GetLocation(); + } + bool operator<=(const ForwardRef& other) const { + return GetLocation() <= other.GetLocation(); + } + bool operator>(const ForwardRef& other) const { + return GetLocation() > other.GetLocation(); + } + + private: + const Location::EmitOperator* op_; + }; + + static const int kNPreallocatedElements = 4; + // The following parameters will not affect ForwardRefList in practice, as we + // resolve all references at once and clear the list, so we do not need to + // remove individual elements by invalidating them. + static const int32_t kInvalidLinkKey = INT32_MAX; + static const size_t kReclaimFrom = 512; + static const size_t kReclaimFactor = 2; + + typedef InvalSet + ForwardRefListBase; + typedef InvalSetIterator ForwardRefListIteratorBase; + + class ForwardRefList : public ForwardRefListBase { + public: + ForwardRefList() : ForwardRefListBase() {} + + using ForwardRefListBase::Back; + using ForwardRefListBase::Front; + }; + + class ForwardRefListIterator : public ForwardRefListIteratorBase { + public: + explicit ForwardRefListIterator(Location* location) + : ForwardRefListIteratorBase(&location->forward_) {} + + // TODO: Remove these and use the STL-like interface instead. We'll need a + // const_iterator implemented for this. + using ForwardRefListIteratorBase::Advance; + using ForwardRefListIteratorBase::Current; + }; + + // For InvalSet::GetKey() and InvalSet::SetKey(). + friend class InvalSet; + + private: + virtual void ResolveReferences(internal::AssemblerBase* assembler) + VIXL_OVERRIDE; + + void SetReferenced() { referenced_ = true; } + + bool HasForwardReferences() const { return !forward_.empty(); } + + ForwardRef GetLastForwardReference() const { + VIXL_ASSERT(HasForwardReferences()); + return forward_.Back(); + } + + // Add forward reference to this object. Called from the assembler. + void AddForwardRef(int32_t instr_location, + const EmitOperator& op, + const ReferenceInfo* info); + + // Check if we need to add padding when binding this object, in order to + // meet the minimum location requirement. + bool Needs16BitPadding(int location) const; + + void EncodeLocationFor(internal::AssemblerBase* assembler, + int32_t from, + const Location::EmitOperator* encoder); + + // True if the label has been used at least once. + bool referenced_; + + protected: + // Types passed to LocationBase. Must be distinct for unbound Locations (not + // relevant for bound locations, as they don't have a correspoding + // PoolObject). + static const int kRawLocation = 0; // Will not be used by the pool manager. + static const int kVeneerType = 1; + static const int kLiteralType = 2; + + // Contains the references to the unbound label + ForwardRefList forward_; + + // To be used only by derived classes. + Location(uint32_t type, int size, int alignment) + : LocationBase(type, size, alignment), referenced_(false) {} + + // To be used only by derived classes. + explicit Location(Offset location) + : LocationBase(location), referenced_(false) {} + + virtual int GetMaxAlignment() const VIXL_OVERRIDE; + virtual int GetMinLocation() const VIXL_OVERRIDE; + + private: + // Included to make the class concrete, however should never be called. + virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE { + USE(masm); + VIXL_UNREACHABLE(); + } +}; + +class Label : public Location { + static const int kVeneerSize = 4; + // Use an alignment of 1 for all architectures. Even though we can bind an + // unused label, because of the way the MacroAssembler works we can always be + // sure to have the correct buffer alignment for the instruction set we are + // using, so we do not need to enforce additional alignment requirements + // here. + // TODO: Consider modifying the interface of the pool manager to pass an + // optional additional alignment to Bind() in order to handle cases where the + // buffer could be unaligned. + static const int kVeneerAlignment = 1; + + public: + Label() : Location(kVeneerType, kVeneerSize, kVeneerAlignment) {} + explicit Label(Offset location) : Location(location) {} + + private: + virtual bool ShouldBeDeletedOnPlacementByPoolManager() const VIXL_OVERRIDE { + return false; + } + virtual bool ShouldDeletePoolObjectOnPlacement() const VIXL_OVERRIDE { + return false; + } + + virtual void UpdatePoolObject(PoolObject* object) VIXL_OVERRIDE; + virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE; + + virtual bool UsePoolObjectEmissionMargin() const VIXL_OVERRIDE { + return true; + } + virtual int32_t GetPoolObjectEmissionMargin() const VIXL_OVERRIDE { + VIXL_ASSERT(UsePoolObjectEmissionMargin() == true); + return 1 * KBytes; + } +}; + +class RawLiteral : public Location { + // Some load instructions require alignment to 4 bytes. Since we do + // not know what instructions will reference a literal after we place + // it, we enforce a 4 byte alignment for literals that are 4 bytes or + // larger. + static const int kLiteralAlignment = 4; + + public: + enum PlacementPolicy { kPlacedWhenUsed, kManuallyPlaced }; + + enum DeletionPolicy { + kDeletedOnPlacementByPool, + kDeletedOnPoolDestruction, + kManuallyDeleted + }; + + RawLiteral(const void* addr, + int size, + PlacementPolicy placement_policy = kPlacedWhenUsed, + DeletionPolicy deletion_policy = kManuallyDeleted) + : Location(kLiteralType, + size, + (size < kLiteralAlignment) ? size : kLiteralAlignment), + addr_(addr), + manually_placed_(placement_policy == kManuallyPlaced), + deletion_policy_(deletion_policy) { + // We can't have manually placed literals that are not manually deleted. + VIXL_ASSERT(!IsManuallyPlaced() || + (GetDeletionPolicy() == kManuallyDeleted)); + } + RawLiteral(const void* addr, int size, DeletionPolicy deletion_policy) + : Location(kLiteralType, + size, + (size < kLiteralAlignment) ? size : kLiteralAlignment), + addr_(addr), + manually_placed_(false), + deletion_policy_(deletion_policy) {} + const void* GetDataAddress() const { return addr_; } + int GetSize() const { return GetPoolObjectSizeInBytes(); } + + bool IsManuallyPlaced() const { return manually_placed_; } + + private: + DeletionPolicy GetDeletionPolicy() const { return deletion_policy_; } + + virtual bool ShouldBeDeletedOnPlacementByPoolManager() const VIXL_OVERRIDE { + return GetDeletionPolicy() == kDeletedOnPlacementByPool; + } + virtual bool ShouldBeDeletedOnPoolManagerDestruction() const VIXL_OVERRIDE { + return GetDeletionPolicy() == kDeletedOnPoolDestruction; + } + virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE; + + // Data address before it's moved into the code buffer. + const void* const addr_; + // When this flag is true, the label will be placed manually. + bool manually_placed_; + // When is the literal to be removed from the memory + // Can be delete'd when: + // moved into the code buffer: kDeletedOnPlacementByPool + // the pool is delete'd: kDeletedOnPoolDestruction + // or left to the application: kManuallyDeleted. + DeletionPolicy deletion_policy_; + + friend class MacroAssembler; +}; + +template +class Literal : public RawLiteral { + public: + explicit Literal(const T& value, + PlacementPolicy placement_policy = kPlacedWhenUsed, + DeletionPolicy deletion_policy = kManuallyDeleted) + : RawLiteral(&value_, sizeof(T), placement_policy, deletion_policy), + value_(value) {} + explicit Literal(const T& value, DeletionPolicy deletion_policy) + : RawLiteral(&value_, sizeof(T), deletion_policy), value_(value) {} + void UpdateValue(const T& value, CodeBuffer* buffer) { + value_ = value; + if (IsBound()) { + buffer->UpdateData(GetLocation(), GetDataAddress(), GetSize()); + } + } + + private: + T value_; +}; + +class StringLiteral : public RawLiteral { + public: + explicit StringLiteral(const char* str, + PlacementPolicy placement_policy = kPlacedWhenUsed, + DeletionPolicy deletion_policy = kManuallyDeleted) + : RawLiteral(str, + static_cast(strlen(str) + 1), + placement_policy, + deletion_policy) { + VIXL_ASSERT((strlen(str) + 1) <= kMaxObjectSize); + } + explicit StringLiteral(const char* str, DeletionPolicy deletion_policy) + : RawLiteral(str, static_cast(strlen(str) + 1), deletion_policy) { + VIXL_ASSERT((strlen(str) + 1) <= kMaxObjectSize); + } +}; + +} // namespace aarch32 + + +// Required InvalSet template specialisations. +#define INVAL_SET_TEMPLATE_PARAMETERS \ + aarch32::Location::ForwardRef, aarch32::Location::kNPreallocatedElements, \ + int32_t, aarch32::Location::kInvalidLinkKey, \ + aarch32::Location::kReclaimFrom, aarch32::Location::kReclaimFactor +template <> +inline int32_t InvalSet::GetKey( + const aarch32::Location::ForwardRef& element) { + return element.GetLocation(); +} +template <> +inline void InvalSet::SetKey( + aarch32::Location::ForwardRef* element, int32_t key) { + element->SetLocationToInvalidateOnly(key); +} +#undef INVAL_SET_TEMPLATE_PARAMETERS + +} // namespace vixl + +#endif // VIXL_AARCH32_LABEL_AARCH32_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch32/macro-assembler-aarch32.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/macro-assembler-aarch32.cc new file mode 100644 index 00000000..56c0ffbd --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/macro-assembler-aarch32.cc @@ -0,0 +1,2312 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#include "aarch32/macro-assembler-aarch32.h" + +#define STRINGIFY(x) #x +#define TOSTRING(x) STRINGIFY(x) + +#define CONTEXT_SCOPE \ + ContextScope context(this, __FILE__ ":" TOSTRING(__LINE__)) + +namespace vixl { +namespace aarch32 { + +ExactAssemblyScopeWithoutPoolsCheck::ExactAssemblyScopeWithoutPoolsCheck( + MacroAssembler* masm, size_t size, SizePolicy size_policy) + : ExactAssemblyScope(masm, + size, + size_policy, + ExactAssemblyScope::kIgnorePools) {} + +void UseScratchRegisterScope::Open(MacroAssembler* masm) { + VIXL_ASSERT(masm_ == NULL); + VIXL_ASSERT(masm != NULL); + masm_ = masm; + + old_available_ = masm_->GetScratchRegisterList()->GetList(); + old_available_vfp_ = masm_->GetScratchVRegisterList()->GetList(); + + parent_ = masm->GetCurrentScratchRegisterScope(); + masm->SetCurrentScratchRegisterScope(this); +} + + +void UseScratchRegisterScope::Close() { + if (masm_ != NULL) { + // Ensure that scopes nest perfectly, and do not outlive their parents. + // This is a run-time check because the order of destruction of objects in + // the _same_ scope is implementation-defined, and is likely to change in + // optimised builds. + VIXL_CHECK(masm_->GetCurrentScratchRegisterScope() == this); + masm_->SetCurrentScratchRegisterScope(parent_); + + masm_->GetScratchRegisterList()->SetList(old_available_); + masm_->GetScratchVRegisterList()->SetList(old_available_vfp_); + + masm_ = NULL; + } +} + + +bool UseScratchRegisterScope::IsAvailable(const Register& reg) const { + VIXL_ASSERT(masm_ != NULL); + VIXL_ASSERT(reg.IsValid()); + return masm_->GetScratchRegisterList()->Includes(reg); +} + + +bool UseScratchRegisterScope::IsAvailable(const VRegister& reg) const { + VIXL_ASSERT(masm_ != NULL); + VIXL_ASSERT(reg.IsValid()); + return masm_->GetScratchVRegisterList()->IncludesAllOf(reg); +} + + +Register UseScratchRegisterScope::Acquire() { + VIXL_ASSERT(masm_ != NULL); + Register reg = masm_->GetScratchRegisterList()->GetFirstAvailableRegister(); + VIXL_CHECK(reg.IsValid()); + masm_->GetScratchRegisterList()->Remove(reg); + return reg; +} + + +VRegister UseScratchRegisterScope::AcquireV(unsigned size_in_bits) { + switch (size_in_bits) { + case kSRegSizeInBits: + return AcquireS(); + case kDRegSizeInBits: + return AcquireD(); + case kQRegSizeInBits: + return AcquireQ(); + default: + VIXL_UNREACHABLE(); + return NoVReg; + } +} + + +QRegister UseScratchRegisterScope::AcquireQ() { + VIXL_ASSERT(masm_ != NULL); + QRegister reg = + masm_->GetScratchVRegisterList()->GetFirstAvailableQRegister(); + VIXL_CHECK(reg.IsValid()); + masm_->GetScratchVRegisterList()->Remove(reg); + return reg; +} + + +DRegister UseScratchRegisterScope::AcquireD() { + VIXL_ASSERT(masm_ != NULL); + DRegister reg = + masm_->GetScratchVRegisterList()->GetFirstAvailableDRegister(); + VIXL_CHECK(reg.IsValid()); + masm_->GetScratchVRegisterList()->Remove(reg); + return reg; +} + + +SRegister UseScratchRegisterScope::AcquireS() { + VIXL_ASSERT(masm_ != NULL); + SRegister reg = + masm_->GetScratchVRegisterList()->GetFirstAvailableSRegister(); + VIXL_CHECK(reg.IsValid()); + masm_->GetScratchVRegisterList()->Remove(reg); + return reg; +} + + +void UseScratchRegisterScope::Release(const Register& reg) { + VIXL_ASSERT(masm_ != NULL); + VIXL_ASSERT(reg.IsValid()); + VIXL_ASSERT(!masm_->GetScratchRegisterList()->Includes(reg)); + masm_->GetScratchRegisterList()->Combine(reg); +} + + +void UseScratchRegisterScope::Release(const VRegister& reg) { + VIXL_ASSERT(masm_ != NULL); + VIXL_ASSERT(reg.IsValid()); + VIXL_ASSERT(!masm_->GetScratchVRegisterList()->IncludesAliasOf(reg)); + masm_->GetScratchVRegisterList()->Combine(reg); +} + + +void UseScratchRegisterScope::Include(const RegisterList& list) { + VIXL_ASSERT(masm_ != NULL); + RegisterList excluded_registers(sp, lr, pc); + uint32_t mask = list.GetList() & ~excluded_registers.GetList(); + RegisterList* available = masm_->GetScratchRegisterList(); + available->SetList(available->GetList() | mask); +} + + +void UseScratchRegisterScope::Include(const VRegisterList& list) { + VIXL_ASSERT(masm_ != NULL); + VRegisterList* available = masm_->GetScratchVRegisterList(); + available->SetList(available->GetList() | list.GetList()); +} + + +void UseScratchRegisterScope::Exclude(const RegisterList& list) { + VIXL_ASSERT(masm_ != NULL); + RegisterList* available = masm_->GetScratchRegisterList(); + available->SetList(available->GetList() & ~list.GetList()); +} + + +void UseScratchRegisterScope::Exclude(const VRegisterList& list) { + VIXL_ASSERT(masm_ != NULL); + VRegisterList* available = masm_->GetScratchVRegisterList(); + available->SetList(available->GetList() & ~list.GetList()); +} + + +void UseScratchRegisterScope::Exclude(const Operand& operand) { + if (operand.IsImmediateShiftedRegister()) { + Exclude(operand.GetBaseRegister()); + } else if (operand.IsRegisterShiftedRegister()) { + Exclude(operand.GetBaseRegister(), operand.GetShiftRegister()); + } else { + VIXL_ASSERT(operand.IsImmediate()); + } +} + + +void UseScratchRegisterScope::ExcludeAll() { + VIXL_ASSERT(masm_ != NULL); + masm_->GetScratchRegisterList()->SetList(0); + masm_->GetScratchVRegisterList()->SetList(0); +} + + +void MacroAssembler::EnsureEmitPoolsFor(size_t size_arg) { + // We skip the check when the pools are blocked. + if (ArePoolsBlocked()) return; + + VIXL_ASSERT(IsUint32(size_arg)); + uint32_t size = static_cast(size_arg); + + if (pool_manager_.MustEmit(GetCursorOffset(), size)) { + int32_t new_pc = pool_manager_.Emit(this, GetCursorOffset(), size); + VIXL_ASSERT(new_pc == GetCursorOffset()); + USE(new_pc); + } +} + + +void MacroAssembler::HandleOutOfBoundsImmediate(Condition cond, + Register tmp, + uint32_t imm) { + if (IsUintN(16, imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + mov(cond, tmp, imm & 0xffff); + return; + } + if (IsUsingT32()) { + if (ImmediateT32::IsImmediateT32(~imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + mvn(cond, tmp, ~imm); + return; + } + } else { + if (ImmediateA32::IsImmediateA32(~imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + mvn(cond, tmp, ~imm); + return; + } + } + CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes); + mov(cond, tmp, imm & 0xffff); + movt(cond, tmp, imm >> 16); +} + + +MemOperand MacroAssembler::MemOperandComputationHelper( + Condition cond, + Register scratch, + Register base, + uint32_t offset, + uint32_t extra_offset_mask) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(scratch)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(base)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + + // Check for the simple pass-through case. + if ((offset & extra_offset_mask) == offset) return MemOperand(base, offset); + + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + + uint32_t load_store_offset = offset & extra_offset_mask; + uint32_t add_offset = offset & ~extra_offset_mask; + if ((add_offset != 0) && + (IsModifiedImmediate(offset) || IsModifiedImmediate(-offset))) { + load_store_offset = 0; + add_offset = offset; + } + + if (base.IsPC()) { + // Special handling for PC bases. We must read the PC in the first + // instruction (and only in that instruction), and we must also take care to + // keep the same address calculation as loads and stores. For T32, that + // means using something like ADR, which uses AlignDown(PC, 4). + + // We don't handle positive offsets from PC because the intention is not + // clear; does the user expect the offset from the current + // GetCursorOffset(), or to allow a certain amount of space after the + // instruction? + VIXL_ASSERT((offset & 0x80000000) != 0); + if (IsUsingT32()) { + // T32: make the first instruction "SUB (immediate, from PC)" -- an alias + // of ADR -- to get behaviour like loads and stores. This ADR can handle + // at least as much offset as the load_store_offset so it can replace it. + + uint32_t sub_pc_offset = (-offset) & 0xfff; + load_store_offset = (offset + sub_pc_offset) & extra_offset_mask; + add_offset = (offset + sub_pc_offset) & ~extra_offset_mask; + + ExactAssemblyScope scope(this, k32BitT32InstructionSizeInBytes); + sub(cond, scratch, base, sub_pc_offset); + + if (add_offset == 0) return MemOperand(scratch, load_store_offset); + + // The rest of the offset can be generated in the usual way. + base = scratch; + } + // A32 can use any SUB instruction, so we don't have to do anything special + // here except to ensure that we read the PC first. + } + + add(cond, scratch, base, add_offset); + return MemOperand(scratch, load_store_offset); +} + + +uint32_t MacroAssembler::GetOffsetMask(InstructionType type, + AddrMode addrmode) { + switch (type) { + case kLdr: + case kLdrb: + case kStr: + case kStrb: + if (IsUsingA32() || (addrmode == Offset)) { + return 0xfff; + } else { + return 0xff; + } + case kLdrsb: + case kLdrh: + case kLdrsh: + case kStrh: + if (IsUsingT32() && (addrmode == Offset)) { + return 0xfff; + } else { + return 0xff; + } + case kVldr: + case kVstr: + return 0x3fc; + case kLdrd: + case kStrd: + if (IsUsingA32()) { + return 0xff; + } else { + return 0x3fc; + } + default: + VIXL_UNREACHABLE(); + return 0; + } +} + + +HARDFLOAT void PrintfTrampolineRRRR( + const char* format, uint32_t a, uint32_t b, uint32_t c, uint32_t d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineRRRD( + const char* format, uint32_t a, uint32_t b, uint32_t c, double d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineRRDR( + const char* format, uint32_t a, uint32_t b, double c, uint32_t d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineRRDD( + const char* format, uint32_t a, uint32_t b, double c, double d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineRDRR( + const char* format, uint32_t a, double b, uint32_t c, uint32_t d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineRDRD( + const char* format, uint32_t a, double b, uint32_t c, double d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineRDDR( + const char* format, uint32_t a, double b, double c, uint32_t d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineRDDD( + const char* format, uint32_t a, double b, double c, double d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineDRRR( + const char* format, double a, uint32_t b, uint32_t c, uint32_t d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineDRRD( + const char* format, double a, uint32_t b, uint32_t c, double d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineDRDR( + const char* format, double a, uint32_t b, double c, uint32_t d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineDRDD( + const char* format, double a, uint32_t b, double c, double d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineDDRR( + const char* format, double a, double b, uint32_t c, uint32_t d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineDDRD( + const char* format, double a, double b, uint32_t c, double d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineDDDR( + const char* format, double a, double b, double c, uint32_t d) { + printf(format, a, b, c, d); +} + + +HARDFLOAT void PrintfTrampolineDDDD( + const char* format, double a, double b, double c, double d) { + printf(format, a, b, c, d); +} + + +void MacroAssembler::Printf(const char* format, + CPURegister reg1, + CPURegister reg2, + CPURegister reg3, + CPURegister reg4) { + // Exclude all registers from the available scratch registers, so + // that we are able to use ip below. + // TODO: Refactor this function to use UseScratchRegisterScope + // for temporary registers below. + UseScratchRegisterScope scratch(this); + scratch.ExcludeAll(); + if (generate_simulator_code_) { + PushRegister(reg4); + PushRegister(reg3); + PushRegister(reg2); + PushRegister(reg1); + Push(RegisterList(r0, r1)); + StringLiteral* format_literal = + new StringLiteral(format, RawLiteral::kDeletedOnPlacementByPool); + Adr(r0, format_literal); + uint32_t args = (reg4.GetType() << 12) | (reg3.GetType() << 8) | + (reg2.GetType() << 4) | reg1.GetType(); + Mov(r1, args); + Hvc(kPrintfCode); + Pop(RegisterList(r0, r1)); + int size = reg4.GetRegSizeInBytes() + reg3.GetRegSizeInBytes() + + reg2.GetRegSizeInBytes() + reg1.GetRegSizeInBytes(); + Drop(size); + } else { + // Generate on a native platform => 32 bit environment. + // Preserve core registers r0-r3, r12, r14 + const uint32_t saved_registers_mask = + kCallerSavedRegistersMask | (1 << r5.GetCode()); + Push(RegisterList(saved_registers_mask)); + // Push VFP registers. + Vpush(Untyped64, DRegisterList(d0, 8)); + if (Has32DRegs()) Vpush(Untyped64, DRegisterList(d16, 16)); + // Search one register which has been saved and which doesn't need to be + // printed. + RegisterList available_registers(kCallerSavedRegistersMask); + if (reg1.GetType() == CPURegister::kRRegister) { + available_registers.Remove(Register(reg1.GetCode())); + } + if (reg2.GetType() == CPURegister::kRRegister) { + available_registers.Remove(Register(reg2.GetCode())); + } + if (reg3.GetType() == CPURegister::kRRegister) { + available_registers.Remove(Register(reg3.GetCode())); + } + if (reg4.GetType() == CPURegister::kRRegister) { + available_registers.Remove(Register(reg4.GetCode())); + } + Register tmp = available_registers.GetFirstAvailableRegister(); + VIXL_ASSERT(tmp.GetType() == CPURegister::kRRegister); + // Push the flags. + Mrs(tmp, APSR); + Push(tmp); + Vmrs(RegisterOrAPSR_nzcv(tmp.GetCode()), FPSCR); + Push(tmp); + // Push the registers to print on the stack. + PushRegister(reg4); + PushRegister(reg3); + PushRegister(reg2); + PushRegister(reg1); + int core_count = 1; + int vfp_count = 0; + uint32_t printf_type = 0; + // Pop the registers to print and store them into r1-r3 and/or d0-d3. + // Reg4 may stay into the stack if all the register to print are core + // registers. + PreparePrintfArgument(reg1, &core_count, &vfp_count, &printf_type); + PreparePrintfArgument(reg2, &core_count, &vfp_count, &printf_type); + PreparePrintfArgument(reg3, &core_count, &vfp_count, &printf_type); + PreparePrintfArgument(reg4, &core_count, &vfp_count, &printf_type); + // Ensure that the stack is aligned on 8 bytes. + And(r5, sp, 0x7); + if (core_count == 5) { + // One 32 bit argument (reg4) has been left on the stack => align the + // stack + // before the argument. + Pop(r0); + Sub(sp, sp, r5); + Push(r0); + } else { + Sub(sp, sp, r5); + } + // Select the right trampoline depending on the arguments. + uintptr_t address; + switch (printf_type) { + case 0: + address = reinterpret_cast(PrintfTrampolineRRRR); + break; + case 1: + address = reinterpret_cast(PrintfTrampolineDRRR); + break; + case 2: + address = reinterpret_cast(PrintfTrampolineRDRR); + break; + case 3: + address = reinterpret_cast(PrintfTrampolineDDRR); + break; + case 4: + address = reinterpret_cast(PrintfTrampolineRRDR); + break; + case 5: + address = reinterpret_cast(PrintfTrampolineDRDR); + break; + case 6: + address = reinterpret_cast(PrintfTrampolineRDDR); + break; + case 7: + address = reinterpret_cast(PrintfTrampolineDDDR); + break; + case 8: + address = reinterpret_cast(PrintfTrampolineRRRD); + break; + case 9: + address = reinterpret_cast(PrintfTrampolineDRRD); + break; + case 10: + address = reinterpret_cast(PrintfTrampolineRDRD); + break; + case 11: + address = reinterpret_cast(PrintfTrampolineDDRD); + break; + case 12: + address = reinterpret_cast(PrintfTrampolineRRDD); + break; + case 13: + address = reinterpret_cast(PrintfTrampolineDRDD); + break; + case 14: + address = reinterpret_cast(PrintfTrampolineRDDD); + break; + case 15: + address = reinterpret_cast(PrintfTrampolineDDDD); + break; + default: + VIXL_UNREACHABLE(); + address = reinterpret_cast(PrintfTrampolineRRRR); + break; + } + StringLiteral* format_literal = + new StringLiteral(format, RawLiteral::kDeletedOnPlacementByPool); + Adr(r0, format_literal); + Mov(ip, Operand::From(address)); + Blx(ip); + // If register reg4 was left on the stack => skip it. + if (core_count == 5) Drop(kRegSizeInBytes); + // Restore the stack as it was before alignment. + Add(sp, sp, r5); + // Restore the flags. + Pop(tmp); + Vmsr(FPSCR, tmp); + Pop(tmp); + Msr(APSR_nzcvqg, tmp); + // Restore the regsisters. + if (Has32DRegs()) Vpop(Untyped64, DRegisterList(d16, 16)); + Vpop(Untyped64, DRegisterList(d0, 8)); + Pop(RegisterList(saved_registers_mask)); + } +} + + +void MacroAssembler::PushRegister(CPURegister reg) { + switch (reg.GetType()) { + case CPURegister::kNoRegister: + break; + case CPURegister::kRRegister: + Push(Register(reg.GetCode())); + break; + case CPURegister::kSRegister: + Vpush(Untyped32, SRegisterList(SRegister(reg.GetCode()))); + break; + case CPURegister::kDRegister: + Vpush(Untyped64, DRegisterList(DRegister(reg.GetCode()))); + break; + case CPURegister::kQRegister: + VIXL_UNIMPLEMENTED(); + break; + } +} + + +void MacroAssembler::PreparePrintfArgument(CPURegister reg, + int* core_count, + int* vfp_count, + uint32_t* printf_type) { + switch (reg.GetType()) { + case CPURegister::kNoRegister: + break; + case CPURegister::kRRegister: + VIXL_ASSERT(*core_count <= 4); + if (*core_count < 4) Pop(Register(*core_count)); + *core_count += 1; + break; + case CPURegister::kSRegister: + VIXL_ASSERT(*vfp_count < 4); + *printf_type |= 1 << (*core_count + *vfp_count - 1); + Vpop(Untyped32, SRegisterList(SRegister(*vfp_count * 2))); + Vcvt(F64, F32, DRegister(*vfp_count), SRegister(*vfp_count * 2)); + *vfp_count += 1; + break; + case CPURegister::kDRegister: + VIXL_ASSERT(*vfp_count < 4); + *printf_type |= 1 << (*core_count + *vfp_count - 1); + Vpop(Untyped64, DRegisterList(DRegister(*vfp_count))); + *vfp_count += 1; + break; + case CPURegister::kQRegister: + VIXL_UNIMPLEMENTED(); + break; + } +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondROp instruction, + Condition cond, + Register rn, + const Operand& operand) { + VIXL_ASSERT((type == kMovt) || (type == kSxtb16) || (type == kTeq) || + (type == kUxtb16)); + + if (type == kMovt) { + VIXL_ABORT_WITH_MSG("`Movt` expects a 16-bit immediate.\n"); + } + + // This delegate only supports teq with immediates. + CONTEXT_SCOPE; + if ((type == kTeq) && operand.IsImmediate()) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + HandleOutOfBoundsImmediate(cond, scratch, operand.GetImmediate()); + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + teq(cond, rn, scratch); + return; + } + Assembler::Delegate(type, instruction, cond, rn, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondSizeROp instruction, + Condition cond, + EncodingSize size, + Register rn, + const Operand& operand) { + CONTEXT_SCOPE; + VIXL_ASSERT(size.IsBest()); + VIXL_ASSERT((type == kCmn) || (type == kCmp) || (type == kMov) || + (type == kMovs) || (type == kMvn) || (type == kMvns) || + (type == kSxtb) || (type == kSxth) || (type == kTst) || + (type == kUxtb) || (type == kUxth)); + if (IsUsingT32() && operand.IsRegisterShiftedRegister()) { + VIXL_ASSERT((type != kMov) || (type != kMovs)); + InstructionCondRROp shiftop = NULL; + switch (operand.GetShift().GetType()) { + case LSL: + shiftop = &Assembler::lsl; + break; + case LSR: + shiftop = &Assembler::lsr; + break; + case ASR: + shiftop = &Assembler::asr; + break; + case RRX: + // A RegisterShiftedRegister operand cannot have a shift of type RRX. + VIXL_UNREACHABLE(); + break; + case ROR: + shiftop = &Assembler::ror; + break; + default: + VIXL_UNREACHABLE(); + } + if (shiftop != NULL) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes); + (this->*shiftop)(cond, + scratch, + operand.GetBaseRegister(), + operand.GetShiftRegister()); + (this->*instruction)(cond, size, rn, scratch); + return; + } + } + if (operand.IsImmediate()) { + uint32_t imm = operand.GetImmediate(); + switch (type) { + case kMov: + case kMovs: + if (!rn.IsPC()) { + // Immediate is too large, but not using PC, so handle with mov{t}. + HandleOutOfBoundsImmediate(cond, rn, imm); + if (type == kMovs) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + tst(cond, rn, rn); + } + return; + } else if (type == kMov) { + VIXL_ASSERT(IsUsingA32() || cond.Is(al)); + // Immediate is too large and using PC, so handle using a temporary + // register. + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + HandleOutOfBoundsImmediate(al, scratch, imm); + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + bx(cond, scratch); + return; + } + break; + case kCmn: + case kCmp: + if (IsUsingA32() || !rn.IsPC()) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + HandleOutOfBoundsImmediate(cond, scratch, imm); + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, size, rn, scratch); + return; + } + break; + case kMvn: + case kMvns: + if (!rn.IsPC()) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + HandleOutOfBoundsImmediate(cond, scratch, imm); + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, size, rn, scratch); + return; + } + break; + case kTst: + if (IsUsingA32() || !rn.IsPC()) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + HandleOutOfBoundsImmediate(cond, scratch, imm); + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, size, rn, scratch); + return; + } + break; + default: // kSxtb, Sxth, Uxtb, Uxth + break; + } + } + Assembler::Delegate(type, instruction, cond, size, rn, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondRROp instruction, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + if ((type == kSxtab) || (type == kSxtab16) || (type == kSxtah) || + (type == kUxtab) || (type == kUxtab16) || (type == kUxtah) || + (type == kPkhbt) || (type == kPkhtb)) { + UnimplementedDelegate(type); + return; + } + + // This delegate only handles the following instructions. + VIXL_ASSERT((type == kOrn) || (type == kOrns) || (type == kRsc) || + (type == kRscs)); + CONTEXT_SCOPE; + + // T32 does not support register shifted register operands, emulate it. + if (IsUsingT32() && operand.IsRegisterShiftedRegister()) { + InstructionCondRROp shiftop = NULL; + switch (operand.GetShift().GetType()) { + case LSL: + shiftop = &Assembler::lsl; + break; + case LSR: + shiftop = &Assembler::lsr; + break; + case ASR: + shiftop = &Assembler::asr; + break; + case RRX: + // A RegisterShiftedRegister operand cannot have a shift of type RRX. + VIXL_UNREACHABLE(); + break; + case ROR: + shiftop = &Assembler::ror; + break; + default: + VIXL_UNREACHABLE(); + } + if (shiftop != NULL) { + UseScratchRegisterScope temps(this); + Register rm = operand.GetBaseRegister(); + Register rs = operand.GetShiftRegister(); + // Try to use rd as a scratch register. We can do this if it aliases rs or + // rm (because we read them in the first instruction), but not rn. + if (!rd.Is(rn)) temps.Include(rd); + Register scratch = temps.Acquire(); + // TODO: The scope length was measured empirically. We should analyse the + // worst-case size and add targetted tests. + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + (this->*shiftop)(cond, scratch, rm, rs); + (this->*instruction)(cond, rd, rn, scratch); + return; + } + } + + // T32 does not have a Rsc instruction, negate the lhs input and turn it into + // an Adc. Adc and Rsc are equivalent using a bitwise NOT: + // adc rd, rn, operand <-> rsc rd, NOT(rn), operand + if (IsUsingT32() && ((type == kRsc) || (type == kRscs))) { + // The RegisterShiftRegister case should have been handled above. + VIXL_ASSERT(!operand.IsRegisterShiftedRegister()); + UseScratchRegisterScope temps(this); + // Try to use rd as a scratch register. We can do this if it aliases rn + // (because we read it in the first instruction), but not rm. + temps.Include(rd); + temps.Exclude(operand); + Register negated_rn = temps.Acquire(); + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + mvn(cond, negated_rn, rn); + } + if (type == kRsc) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + adc(cond, rd, negated_rn, operand); + return; + } + // TODO: We shouldn't have to specify how much space the next instruction + // needs. + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + adcs(cond, rd, negated_rn, operand); + return; + } + + if (operand.IsImmediate()) { + // If the immediate can be encoded when inverted, turn Orn into Orr. + // Otherwise rely on HandleOutOfBoundsImmediate to generate a series of + // mov. + int32_t imm = operand.GetSignedImmediate(); + if (((type == kOrn) || (type == kOrns)) && IsModifiedImmediate(~imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + switch (type) { + case kOrn: + orr(cond, rd, rn, ~imm); + return; + case kOrns: + orrs(cond, rd, rn, ~imm); + return; + default: + VIXL_UNREACHABLE(); + break; + } + } + } + + // A32 does not have a Orn instruction, negate the rhs input and turn it into + // a Orr. + if (IsUsingA32() && ((type == kOrn) || (type == kOrns))) { + // TODO: orn r0, r1, imm -> orr r0, r1, neg(imm) if doable + // mvn r0, r2 + // orr r0, r1, r0 + Register scratch; + UseScratchRegisterScope temps(this); + // Try to use rd as a scratch register. We can do this if it aliases rs or + // rm (because we read them in the first instruction), but not rn. + if (!rd.Is(rn)) temps.Include(rd); + scratch = temps.Acquire(); + { + // TODO: We shouldn't have to specify how much space the next instruction + // needs. + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + mvn(cond, scratch, operand); + } + if (type == kOrns) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + orrs(cond, rd, rn, scratch); + return; + } + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + orr(cond, rd, rn, scratch); + return; + } + + if (operand.IsImmediate()) { + UseScratchRegisterScope temps(this); + // Allow using the destination as a scratch register if possible. + if (!rd.Is(rn)) temps.Include(rd); + Register scratch = temps.Acquire(); + int32_t imm = operand.GetSignedImmediate(); + HandleOutOfBoundsImmediate(cond, scratch, imm); + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, rd, rn, scratch); + return; + } + Assembler::Delegate(type, instruction, cond, rd, rn, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondSizeRL instruction, + Condition cond, + EncodingSize size, + Register rd, + Location* location) { + VIXL_ASSERT((type == kLdr) || (type == kAdr)); + + CONTEXT_SCOPE; + VIXL_ASSERT(size.IsBest()); + + if ((type == kLdr) && location->IsBound()) { + CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes); + UseScratchRegisterScope temps(this); + temps.Include(rd); + uint32_t mask = GetOffsetMask(type, Offset); + ldr(rd, MemOperandComputationHelper(cond, temps.Acquire(), location, mask)); + return; + } + + Assembler::Delegate(type, instruction, cond, size, rd, location); +} + + +bool MacroAssembler::GenerateSplitInstruction( + InstructionCondSizeRROp instruction, + Condition cond, + Register rd, + Register rn, + uint32_t imm, + uint32_t mask) { + uint32_t high = imm & ~mask; + if (!IsModifiedImmediate(high) && !rn.IsPC()) return false; + // If high is a modified immediate, we can perform the operation with + // only 2 instructions. + // Else, if rn is PC, we want to avoid moving PC into a temporary. + // Therefore, we also use the pattern even if the second call may + // generate 3 instructions. + uint32_t low = imm & mask; + CodeBufferCheckScope scope(this, + (rn.IsPC() ? 4 : 2) * kMaxInstructionSizeInBytes); + (this->*instruction)(cond, Best, rd, rn, low); + (this->*instruction)(cond, Best, rd, rd, high); + return true; +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondSizeRROp instruction, + Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) { + VIXL_ASSERT( + (type == kAdc) || (type == kAdcs) || (type == kAdd) || (type == kAdds) || + (type == kAnd) || (type == kAnds) || (type == kAsr) || (type == kAsrs) || + (type == kBic) || (type == kBics) || (type == kEor) || (type == kEors) || + (type == kLsl) || (type == kLsls) || (type == kLsr) || (type == kLsrs) || + (type == kOrr) || (type == kOrrs) || (type == kRor) || (type == kRors) || + (type == kRsb) || (type == kRsbs) || (type == kSbc) || (type == kSbcs) || + (type == kSub) || (type == kSubs)); + + CONTEXT_SCOPE; + VIXL_ASSERT(size.IsBest()); + if (IsUsingT32() && operand.IsRegisterShiftedRegister()) { + InstructionCondRROp shiftop = NULL; + switch (operand.GetShift().GetType()) { + case LSL: + shiftop = &Assembler::lsl; + break; + case LSR: + shiftop = &Assembler::lsr; + break; + case ASR: + shiftop = &Assembler::asr; + break; + case RRX: + // A RegisterShiftedRegister operand cannot have a shift of type RRX. + VIXL_UNREACHABLE(); + break; + case ROR: + shiftop = &Assembler::ror; + break; + default: + VIXL_UNREACHABLE(); + } + if (shiftop != NULL) { + UseScratchRegisterScope temps(this); + Register rm = operand.GetBaseRegister(); + Register rs = operand.GetShiftRegister(); + // Try to use rd as a scratch register. We can do this if it aliases rs or + // rm (because we read them in the first instruction), but not rn. + if (!rd.Is(rn)) temps.Include(rd); + Register scratch = temps.Acquire(); + CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes); + (this->*shiftop)(cond, scratch, rm, rs); + (this->*instruction)(cond, size, rd, rn, scratch); + return; + } + } + if (operand.IsImmediate()) { + int32_t imm = operand.GetSignedImmediate(); + if (ImmediateT32::IsImmediateT32(~imm)) { + if (IsUsingT32()) { + switch (type) { + case kOrr: + orn(cond, rd, rn, ~imm); + return; + case kOrrs: + orns(cond, rd, rn, ~imm); + return; + default: + break; + } + } + } + if (imm < 0) { + InstructionCondSizeRROp asmcb = NULL; + // Add and sub are equivalent using an arithmetic negation: + // add rd, rn, #imm <-> sub rd, rn, - #imm + // Add and sub with carry are equivalent using a bitwise NOT: + // adc rd, rn, #imm <-> sbc rd, rn, NOT #imm + switch (type) { + case kAdd: + asmcb = &Assembler::sub; + imm = -imm; + break; + case kAdds: + asmcb = &Assembler::subs; + imm = -imm; + break; + case kSub: + asmcb = &Assembler::add; + imm = -imm; + break; + case kSubs: + asmcb = &Assembler::adds; + imm = -imm; + break; + case kAdc: + asmcb = &Assembler::sbc; + imm = ~imm; + break; + case kAdcs: + asmcb = &Assembler::sbcs; + imm = ~imm; + break; + case kSbc: + asmcb = &Assembler::adc; + imm = ~imm; + break; + case kSbcs: + asmcb = &Assembler::adcs; + imm = ~imm; + break; + default: + break; + } + if (asmcb != NULL) { + CodeBufferCheckScope scope(this, 4 * kMaxInstructionSizeInBytes); + (this->*asmcb)(cond, size, rd, rn, Operand(imm)); + return; + } + } + + // When rn is PC, only handle negative offsets. The correct way to handle + // positive offsets isn't clear; does the user want the offset from the + // start of the macro, or from the end (to allow a certain amount of space)? + // When type is Add or Sub, imm is always positive (imm < 0 has just been + // handled and imm == 0 would have been generated without the need of a + // delegate). Therefore, only add to PC is forbidden here. + if ((((type == kAdd) && !rn.IsPC()) || (type == kSub)) && + (IsUsingA32() || (!rd.IsPC() && !rn.IsPC()))) { + VIXL_ASSERT(imm > 0); + // Try to break the constant into two modified immediates. + // For T32 also try to break the constant into one imm12 and one modified + // immediate. Count the trailing zeroes and get the biggest even value. + int trailing_zeroes = CountTrailingZeros(imm) & ~1u; + uint32_t mask = ((trailing_zeroes < 4) && IsUsingT32()) + ? 0xfff + : (0xff << trailing_zeroes); + if (GenerateSplitInstruction(instruction, cond, rd, rn, imm, mask)) { + return; + } + InstructionCondSizeRROp asmcb = NULL; + switch (type) { + case kAdd: + asmcb = &Assembler::sub; + break; + case kSub: + asmcb = &Assembler::add; + break; + default: + VIXL_UNREACHABLE(); + } + if (GenerateSplitInstruction(asmcb, cond, rd, rn, -imm, mask)) { + return; + } + } + + UseScratchRegisterScope temps(this); + // Allow using the destination as a scratch register if possible. + if (!rd.Is(rn)) temps.Include(rd); + if (rn.IsPC()) { + // If we're reading the PC, we need to do it in the first instruction, + // otherwise we'll read the wrong value. We rely on this to handle the + // long-range PC-relative MemOperands which can result from user-managed + // literals. + + // Only handle negative offsets. The correct way to handle positive + // offsets isn't clear; does the user want the offset from the start of + // the macro, or from the end (to allow a certain amount of space)? + bool offset_is_negative_or_zero = (imm <= 0); + switch (type) { + case kAdd: + case kAdds: + offset_is_negative_or_zero = (imm <= 0); + break; + case kSub: + case kSubs: + offset_is_negative_or_zero = (imm >= 0); + break; + case kAdc: + case kAdcs: + offset_is_negative_or_zero = (imm < 0); + break; + case kSbc: + case kSbcs: + offset_is_negative_or_zero = (imm > 0); + break; + default: + break; + } + if (offset_is_negative_or_zero) { + { + rn = temps.Acquire(); + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + mov(cond, rn, pc); + } + // Recurse rather than falling through, to try to get the immediate into + // a single instruction. + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + (this->*instruction)(cond, size, rd, rn, operand); + return; + } + } else { + Register scratch = temps.Acquire(); + // TODO: The scope length was measured empirically. We should analyse the + // worst-case size and add targetted tests. + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + mov(cond, scratch, operand.GetImmediate()); + (this->*instruction)(cond, size, rd, rn, scratch); + return; + } + } + Assembler::Delegate(type, instruction, cond, size, rd, rn, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionRL instruction, + Register rn, + Location* location) { + VIXL_ASSERT((type == kCbz) || (type == kCbnz)); + + CONTEXT_SCOPE; + CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes); + if (IsUsingA32()) { + if (type == kCbz) { + VIXL_ABORT_WITH_MSG("Cbz is only available for T32.\n"); + } else { + VIXL_ABORT_WITH_MSG("Cbnz is only available for T32.\n"); + } + } else if (rn.IsLow()) { + switch (type) { + case kCbnz: { + Label done; + cbz(rn, &done); + b(location); + Bind(&done); + return; + } + case kCbz: { + Label done; + cbnz(rn, &done); + b(location); + Bind(&done); + return; + } + default: + break; + } + } + Assembler::Delegate(type, instruction, rn, location); +} + + +template +static inline bool IsI64BitPattern(T imm) { + for (T mask = 0xff << ((sizeof(T) - 1) * 8); mask != 0; mask >>= 8) { + if (((imm & mask) != mask) && ((imm & mask) != 0)) return false; + } + return true; +} + + +template +static inline bool IsI8BitPattern(T imm) { + uint8_t imm8 = imm & 0xff; + for (unsigned rep = sizeof(T) - 1; rep > 0; rep--) { + imm >>= 8; + if ((imm & 0xff) != imm8) return false; + } + return true; +} + + +static inline bool CanBeInverted(uint32_t imm32) { + uint32_t fill8 = 0; + + if ((imm32 & 0xffffff00) == 0xffffff00) { + // 11111111 11111111 11111111 abcdefgh + return true; + } + if (((imm32 & 0xff) == 0) || ((imm32 & 0xff) == 0xff)) { + fill8 = imm32 & 0xff; + imm32 >>= 8; + if ((imm32 >> 8) == 0xffff) { + // 11111111 11111111 abcdefgh 00000000 + // or 11111111 11111111 abcdefgh 11111111 + return true; + } + if ((imm32 & 0xff) == fill8) { + imm32 >>= 8; + if ((imm32 >> 8) == 0xff) { + // 11111111 abcdefgh 00000000 00000000 + // or 11111111 abcdefgh 11111111 11111111 + return true; + } + if ((fill8 == 0xff) && ((imm32 & 0xff) == 0xff)) { + // abcdefgh 11111111 11111111 11111111 + return true; + } + } + } + return false; +} + + +template +static inline RES replicate(T imm) { + VIXL_ASSERT((sizeof(RES) > sizeof(T)) && + (((sizeof(RES) / sizeof(T)) * sizeof(T)) == sizeof(RES))); + RES res = imm; + for (unsigned i = sizeof(RES) / sizeof(T) - 1; i > 0; i--) { + res = (res << (sizeof(T) * 8)) | imm; + } + return res; +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondDtSSop instruction, + Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + CONTEXT_SCOPE; + if (type == kVmov) { + if (operand.IsImmediate() && dt.Is(F32)) { + const NeonImmediate& neon_imm = operand.GetNeonImmediate(); + if (neon_imm.CanConvert()) { + // movw ip, imm16 + // movk ip, imm16 + // vmov s0, ip + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + float f = neon_imm.GetImmediate(); + // TODO: The scope length was measured empirically. We should analyse + // the + // worst-case size and add targetted tests. + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + mov(cond, scratch, FloatToRawbits(f)); + vmov(cond, rd, scratch); + return; + } + } + } + Assembler::Delegate(type, instruction, cond, dt, rd, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondDtDDop instruction, + Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + CONTEXT_SCOPE; + if (type == kVmov) { + if (operand.IsImmediate()) { + const NeonImmediate& neon_imm = operand.GetNeonImmediate(); + switch (dt.GetValue()) { + case I32: + if (neon_imm.CanConvert()) { + uint32_t imm = neon_imm.GetImmediate(); + // vmov.i32 d0, 0xabababab will translate into vmov.i8 d0, 0xab + if (IsI8BitPattern(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, I8, rd, imm & 0xff); + return; + } + // vmov.i32 d0, 0xff0000ff will translate into + // vmov.i64 d0, 0xff0000ffff0000ff + if (IsI64BitPattern(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, I64, rd, replicate(imm)); + return; + } + // vmov.i32 d0, 0xffab0000 will translate into + // vmvn.i32 d0, 0x0054ffff + if (cond.Is(al) && CanBeInverted(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmvn(I32, rd, ~imm); + return; + } + } + break; + case I16: + if (neon_imm.CanConvert()) { + uint16_t imm = neon_imm.GetImmediate(); + // vmov.i16 d0, 0xabab will translate into vmov.i8 d0, 0xab + if (IsI8BitPattern(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, I8, rd, imm & 0xff); + return; + } + } + break; + case I64: + if (neon_imm.CanConvert()) { + uint64_t imm = neon_imm.GetImmediate(); + // vmov.i64 d0, -1 will translate into vmov.i8 d0, 0xff + if (IsI8BitPattern(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, I8, rd, imm & 0xff); + return; + } + // mov ip, lo(imm64) + // vdup d0, ip + // vdup is prefered to 'vmov d0[0]' as d0[1] does not need to be + // preserved + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + { + // TODO: The scope length was measured empirically. We should + // analyse the + // worst-case size and add targetted tests. + CodeBufferCheckScope scope(this, + 2 * kMaxInstructionSizeInBytes); + mov(cond, scratch, static_cast(imm & 0xffffffff)); + } + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vdup(cond, Untyped32, rd, scratch); + } + // mov ip, hi(imm64) + // vmov d0[1], ip + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + { + // TODO: The scope length was measured empirically. We should + // analyse the + // worst-case size and add targetted tests. + CodeBufferCheckScope scope(this, + 2 * kMaxInstructionSizeInBytes); + mov(cond, scratch, static_cast(imm >> 32)); + } + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, Untyped32, DRegisterLane(rd, 1), scratch); + } + return; + } + break; + default: + break; + } + VIXL_ASSERT(!dt.Is(I8)); // I8 cases should have been handled already. + if ((dt.Is(I16) || dt.Is(I32)) && neon_imm.CanConvert()) { + // mov ip, imm32 + // vdup.16 d0, ip + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + { + CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes); + mov(cond, scratch, neon_imm.GetImmediate()); + } + DataTypeValue vdup_dt = Untyped32; + switch (dt.GetValue()) { + case I16: + vdup_dt = Untyped16; + break; + case I32: + vdup_dt = Untyped32; + break; + default: + VIXL_UNREACHABLE(); + } + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vdup(cond, vdup_dt, rd, scratch); + return; + } + if (dt.Is(F32) && neon_imm.CanConvert()) { + float f = neon_imm.GetImmediate(); + // Punt to vmov.i32 + // TODO: The scope length was guessed based on the double case below. We + // should analyse the worst-case size and add targetted tests. + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + vmov(cond, I32, rd, FloatToRawbits(f)); + return; + } + if (dt.Is(F64) && neon_imm.CanConvert()) { + // Punt to vmov.i64 + double d = neon_imm.GetImmediate(); + // TODO: The scope length was measured empirically. We should analyse + // the + // worst-case size and add targetted tests. + CodeBufferCheckScope scope(this, 6 * kMaxInstructionSizeInBytes); + vmov(cond, I64, rd, DoubleToRawbits(d)); + return; + } + } + } + Assembler::Delegate(type, instruction, cond, dt, rd, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondDtQQop instruction, + Condition cond, + DataType dt, + QRegister rd, + const QOperand& operand) { + CONTEXT_SCOPE; + if (type == kVmov) { + if (operand.IsImmediate()) { + const NeonImmediate& neon_imm = operand.GetNeonImmediate(); + switch (dt.GetValue()) { + case I32: + if (neon_imm.CanConvert()) { + uint32_t imm = neon_imm.GetImmediate(); + // vmov.i32 d0, 0xabababab will translate into vmov.i8 d0, 0xab + if (IsI8BitPattern(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, I8, rd, imm & 0xff); + return; + } + // vmov.i32 d0, 0xff0000ff will translate into + // vmov.i64 d0, 0xff0000ffff0000ff + if (IsI64BitPattern(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, I64, rd, replicate(imm)); + return; + } + // vmov.i32 d0, 0xffab0000 will translate into + // vmvn.i32 d0, 0x0054ffff + if (CanBeInverted(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmvn(cond, I32, rd, ~imm); + return; + } + } + break; + case I16: + if (neon_imm.CanConvert()) { + uint16_t imm = neon_imm.GetImmediate(); + // vmov.i16 d0, 0xabab will translate into vmov.i8 d0, 0xab + if (IsI8BitPattern(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, I8, rd, imm & 0xff); + return; + } + } + break; + case I64: + if (neon_imm.CanConvert()) { + uint64_t imm = neon_imm.GetImmediate(); + // vmov.i64 d0, -1 will translate into vmov.i8 d0, 0xff + if (IsI8BitPattern(imm)) { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, I8, rd, imm & 0xff); + return; + } + // mov ip, lo(imm64) + // vdup q0, ip + // vdup is prefered to 'vmov d0[0]' as d0[1-3] don't need to be + // preserved + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + { + CodeBufferCheckScope scope(this, + 2 * kMaxInstructionSizeInBytes); + mov(cond, scratch, static_cast(imm & 0xffffffff)); + } + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vdup(cond, Untyped32, rd, scratch); + } + // mov ip, hi(imm64) + // vmov.i32 d0[1], ip + // vmov d1, d0 + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + { + CodeBufferCheckScope scope(this, + 2 * kMaxInstructionSizeInBytes); + mov(cond, scratch, static_cast(imm >> 32)); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, + Untyped32, + DRegisterLane(rd.GetLowDRegister(), 1), + scratch); + } + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vmov(cond, F64, rd.GetHighDRegister(), rd.GetLowDRegister()); + } + return; + } + break; + default: + break; + } + VIXL_ASSERT(!dt.Is(I8)); // I8 cases should have been handled already. + if ((dt.Is(I16) || dt.Is(I32)) && neon_imm.CanConvert()) { + // mov ip, imm32 + // vdup.16 d0, ip + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + { + CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes); + mov(cond, scratch, neon_imm.GetImmediate()); + } + DataTypeValue vdup_dt = Untyped32; + switch (dt.GetValue()) { + case I16: + vdup_dt = Untyped16; + break; + case I32: + vdup_dt = Untyped32; + break; + default: + VIXL_UNREACHABLE(); + } + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + vdup(cond, vdup_dt, rd, scratch); + return; + } + if (dt.Is(F32) && neon_imm.CanConvert()) { + // Punt to vmov.i64 + float f = neon_imm.GetImmediate(); + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + vmov(cond, I32, rd, FloatToRawbits(f)); + return; + } + if (dt.Is(F64) && neon_imm.CanConvert()) { + // Use vmov to create the double in the low D register, then duplicate + // it into the high D register. + double d = neon_imm.GetImmediate(); + CodeBufferCheckScope scope(this, 7 * kMaxInstructionSizeInBytes); + vmov(cond, F64, rd.GetLowDRegister(), d); + vmov(cond, F64, rd.GetHighDRegister(), rd.GetLowDRegister()); + return; + } + } + } + Assembler::Delegate(type, instruction, cond, dt, rd, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondRL instruction, + Condition cond, + Register rt, + Location* location) { + VIXL_ASSERT((type == kLdrb) || (type == kLdrh) || (type == kLdrsb) || + (type == kLdrsh)); + + CONTEXT_SCOPE; + + if (location->IsBound()) { + CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes); + UseScratchRegisterScope temps(this); + temps.Include(rt); + Register scratch = temps.Acquire(); + uint32_t mask = GetOffsetMask(type, Offset); + switch (type) { + case kLdrb: + ldrb(rt, MemOperandComputationHelper(cond, scratch, location, mask)); + return; + case kLdrh: + ldrh(rt, MemOperandComputationHelper(cond, scratch, location, mask)); + return; + case kLdrsb: + ldrsb(rt, MemOperandComputationHelper(cond, scratch, location, mask)); + return; + case kLdrsh: + ldrsh(rt, MemOperandComputationHelper(cond, scratch, location, mask)); + return; + default: + VIXL_UNREACHABLE(); + } + return; + } + + Assembler::Delegate(type, instruction, cond, rt, location); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondRRL instruction, + Condition cond, + Register rt, + Register rt2, + Location* location) { + VIXL_ASSERT(type == kLdrd); + + CONTEXT_SCOPE; + + if (location->IsBound()) { + CodeBufferCheckScope scope(this, 6 * kMaxInstructionSizeInBytes); + UseScratchRegisterScope temps(this); + temps.Include(rt, rt2); + Register scratch = temps.Acquire(); + uint32_t mask = GetOffsetMask(type, Offset); + ldrd(rt, rt2, MemOperandComputationHelper(cond, scratch, location, mask)); + return; + } + + Assembler::Delegate(type, instruction, cond, rt, rt2, location); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondSizeRMop instruction, + Condition cond, + EncodingSize size, + Register rd, + const MemOperand& operand) { + CONTEXT_SCOPE; + VIXL_ASSERT(size.IsBest()); + VIXL_ASSERT((type == kLdr) || (type == kLdrb) || (type == kLdrh) || + (type == kLdrsb) || (type == kLdrsh) || (type == kStr) || + (type == kStrb) || (type == kStrh)); + if (operand.IsImmediate()) { + const Register& rn = operand.GetBaseRegister(); + AddrMode addrmode = operand.GetAddrMode(); + int32_t offset = operand.GetOffsetImmediate(); + uint32_t extra_offset_mask = GetOffsetMask(type, addrmode); + // Try to maximize the offset used by the MemOperand (load_store_offset). + // Add the part which can't be used by the MemOperand (add_offset). + uint32_t load_store_offset = offset & extra_offset_mask; + uint32_t add_offset = offset & ~extra_offset_mask; + if ((add_offset != 0) && + (IsModifiedImmediate(offset) || IsModifiedImmediate(-offset))) { + load_store_offset = 0; + add_offset = offset; + } + switch (addrmode) { + case PreIndex: + // Avoid the unpredictable case 'str r0, [r0, imm]!' + if (!rn.Is(rd)) { + // Pre-Indexed case: + // ldr r0, [r1, 12345]! will translate into + // add r1, r1, 12345 + // ldr r0, [r1] + { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, rn, rn, add_offset); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, + size, + rd, + MemOperand(rn, load_store_offset, PreIndex)); + } + return; + } + break; + case Offset: { + UseScratchRegisterScope temps(this); + // Allow using the destination as a scratch register if possible. + if ((type != kStr) && (type != kStrb) && (type != kStrh) && + !rd.Is(rn)) { + temps.Include(rd); + } + Register scratch = temps.Acquire(); + // Offset case: + // ldr r0, [r1, 12345] will translate into + // add r0, r1, 12345 + // ldr r0, [r0] + { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, scratch, rn, add_offset); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, + size, + rd, + MemOperand(scratch, load_store_offset)); + } + return; + } + case PostIndex: + // Avoid the unpredictable case 'ldr r0, [r0], imm' + if (!rn.Is(rd)) { + // Post-indexed case: + // ldr r0. [r1], imm32 will translate into + // ldr r0, [r1] + // movw ip. imm32 & 0xffffffff + // movt ip, imm32 >> 16 + // add r1, r1, ip + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, + size, + rd, + MemOperand(rn, load_store_offset, PostIndex)); + } + { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, rn, rn, add_offset); + } + return; + } + break; + } + } else if (operand.IsPlainRegister()) { + const Register& rn = operand.GetBaseRegister(); + AddrMode addrmode = operand.GetAddrMode(); + const Register& rm = operand.GetOffsetRegister(); + if (rm.IsPC()) { + VIXL_ABORT_WITH_MSG( + "The MacroAssembler does not convert loads and stores with a PC " + "offset register.\n"); + } + if (rn.IsPC()) { + if (addrmode == Offset) { + if (IsUsingT32()) { + VIXL_ABORT_WITH_MSG( + "The MacroAssembler does not convert loads and stores with a PC " + "base register for T32.\n"); + } + } else { + VIXL_ABORT_WITH_MSG( + "The MacroAssembler does not convert loads and stores with a PC " + "base register in pre-index or post-index mode.\n"); + } + } + switch (addrmode) { + case PreIndex: + // Avoid the unpredictable case 'str r0, [r0, imm]!' + if (!rn.Is(rd)) { + // Pre-Indexed case: + // ldr r0, [r1, r2]! will translate into + // add r1, r1, r2 + // ldr r0, [r1] + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + if (operand.GetSign().IsPlus()) { + add(cond, rn, rn, rm); + } else { + sub(cond, rn, rn, rm); + } + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, size, rd, MemOperand(rn, Offset)); + } + return; + } + break; + case Offset: { + UseScratchRegisterScope temps(this); + // Allow using the destination as a scratch register if this is not a + // store. + // Avoid using PC as a temporary as this has side-effects. + if ((type != kStr) && (type != kStrb) && (type != kStrh) && + !rd.IsPC()) { + temps.Include(rd); + } + Register scratch = temps.Acquire(); + // Offset case: + // ldr r0, [r1, r2] will translate into + // add r0, r1, r2 + // ldr r0, [r0] + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + if (operand.GetSign().IsPlus()) { + add(cond, scratch, rn, rm); + } else { + sub(cond, scratch, rn, rm); + } + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, size, rd, MemOperand(scratch, Offset)); + } + return; + } + case PostIndex: + // Avoid the unpredictable case 'ldr r0, [r0], imm' + if (!rn.Is(rd)) { + // Post-indexed case: + // ldr r0. [r1], r2 will translate into + // ldr r0, [r1] + // add r1, r1, r2 + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, size, rd, MemOperand(rn, Offset)); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + if (operand.GetSign().IsPlus()) { + add(cond, rn, rn, rm); + } else { + sub(cond, rn, rn, rm); + } + } + return; + } + break; + } + } + Assembler::Delegate(type, instruction, cond, size, rd, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondRRMop instruction, + Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) { + if ((type == kLdaexd) || (type == kLdrexd) || (type == kStlex) || + (type == kStlexb) || (type == kStlexh) || (type == kStrex) || + (type == kStrexb) || (type == kStrexh)) { + UnimplementedDelegate(type); + return; + } + + VIXL_ASSERT((type == kLdrd) || (type == kStrd)); + + CONTEXT_SCOPE; + + // TODO: Should we allow these cases? + if (IsUsingA32()) { + // The first register needs to be even. + if ((rt.GetCode() & 1) != 0) { + UnimplementedDelegate(type); + return; + } + // Registers need to be adjacent. + if (((rt.GetCode() + 1) % kNumberOfRegisters) != rt2.GetCode()) { + UnimplementedDelegate(type); + return; + } + // LDRD lr, pc [...] is not allowed. + if (rt.Is(lr)) { + UnimplementedDelegate(type); + return; + } + } + + if (operand.IsImmediate()) { + const Register& rn = operand.GetBaseRegister(); + AddrMode addrmode = operand.GetAddrMode(); + int32_t offset = operand.GetOffsetImmediate(); + uint32_t extra_offset_mask = GetOffsetMask(type, addrmode); + // Try to maximize the offset used by the MemOperand (load_store_offset). + // Add the part which can't be used by the MemOperand (add_offset). + uint32_t load_store_offset = offset & extra_offset_mask; + uint32_t add_offset = offset & ~extra_offset_mask; + if ((add_offset != 0) && + (IsModifiedImmediate(offset) || IsModifiedImmediate(-offset))) { + load_store_offset = 0; + add_offset = offset; + } + switch (addrmode) { + case PreIndex: { + // Allow using the destinations as a scratch registers if possible. + UseScratchRegisterScope temps(this); + if (type == kLdrd) { + if (!rt.Is(rn)) temps.Include(rt); + if (!rt2.Is(rn)) temps.Include(rt2); + } + + // Pre-Indexed case: + // ldrd r0, r1, [r2, 12345]! will translate into + // add r2, 12345 + // ldrd r0, r1, [r2] + { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, rn, rn, add_offset); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, + rt, + rt2, + MemOperand(rn, load_store_offset, PreIndex)); + } + return; + } + case Offset: { + UseScratchRegisterScope temps(this); + // Allow using the destinations as a scratch registers if possible. + if (type == kLdrd) { + if (!rt.Is(rn)) temps.Include(rt); + if (!rt2.Is(rn)) temps.Include(rt2); + } + Register scratch = temps.Acquire(); + // Offset case: + // ldrd r0, r1, [r2, 12345] will translate into + // add r0, r2, 12345 + // ldrd r0, r1, [r0] + { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, scratch, rn, add_offset); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, + rt, + rt2, + MemOperand(scratch, load_store_offset)); + } + return; + } + case PostIndex: + // Avoid the unpredictable case 'ldrd r0, r1, [r0], imm' + if (!rn.Is(rt) && !rn.Is(rt2)) { + // Post-indexed case: + // ldrd r0, r1, [r2], imm32 will translate into + // ldrd r0, r1, [r2] + // movw ip. imm32 & 0xffffffff + // movt ip, imm32 >> 16 + // add r2, ip + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, + rt, + rt2, + MemOperand(rn, load_store_offset, PostIndex)); + } + { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, rn, rn, add_offset); + } + return; + } + break; + } + } + if (operand.IsPlainRegister()) { + const Register& rn = operand.GetBaseRegister(); + const Register& rm = operand.GetOffsetRegister(); + AddrMode addrmode = operand.GetAddrMode(); + switch (addrmode) { + case PreIndex: + // ldrd r0, r1, [r2, r3]! will translate into + // add r2, r3 + // ldrd r0, r1, [r2] + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + if (operand.GetSign().IsPlus()) { + add(cond, rn, rn, rm); + } else { + sub(cond, rn, rn, rm); + } + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, rt, rt2, MemOperand(rn, Offset)); + } + return; + case PostIndex: + // ldrd r0, r1, [r2], r3 will translate into + // ldrd r0, r1, [r2] + // add r2, r3 + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, rt, rt2, MemOperand(rn, Offset)); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + if (operand.GetSign().IsPlus()) { + add(cond, rn, rn, rm); + } else { + sub(cond, rn, rn, rm); + } + } + return; + case Offset: { + UseScratchRegisterScope temps(this); + // Allow using the destinations as a scratch registers if possible. + if (type == kLdrd) { + if (!rt.Is(rn)) temps.Include(rt); + if (!rt2.Is(rn)) temps.Include(rt2); + } + Register scratch = temps.Acquire(); + // Offset case: + // ldrd r0, r1, [r2, r3] will translate into + // add r0, r2, r3 + // ldrd r0, r1, [r0] + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + if (operand.GetSign().IsPlus()) { + add(cond, scratch, rn, rm); + } else { + sub(cond, scratch, rn, rm); + } + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, rt, rt2, MemOperand(scratch, Offset)); + } + return; + } + } + } + Assembler::Delegate(type, instruction, cond, rt, rt2, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondDtSMop instruction, + Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand) { + CONTEXT_SCOPE; + if (operand.IsImmediate()) { + const Register& rn = operand.GetBaseRegister(); + AddrMode addrmode = operand.GetAddrMode(); + int32_t offset = operand.GetOffsetImmediate(); + VIXL_ASSERT(((offset > 0) && operand.GetSign().IsPlus()) || + ((offset < 0) && operand.GetSign().IsMinus()) || (offset == 0)); + if (rn.IsPC()) { + VIXL_ABORT_WITH_MSG( + "The MacroAssembler does not convert vldr or vstr with a PC base " + "register.\n"); + } + switch (addrmode) { + case PreIndex: + // Pre-Indexed case: + // vldr.32 s0, [r1, 12345]! will translate into + // add r1, 12345 + // vldr.32 s0, [r1] + if (offset != 0) { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, rn, rn, offset); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, dt, rd, MemOperand(rn, Offset)); + } + return; + case Offset: { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + // Offset case: + // vldr.32 s0, [r1, 12345] will translate into + // add ip, r1, 12345 + // vldr.32 s0, [ip] + { + VIXL_ASSERT(offset != 0); + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, scratch, rn, offset); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, dt, rd, MemOperand(scratch, Offset)); + } + return; + } + case PostIndex: + // Post-indexed case: + // vldr.32 s0, [r1], imm32 will translate into + // vldr.32 s0, [r1] + // movw ip. imm32 & 0xffffffff + // movt ip, imm32 >> 16 + // add r1, ip + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, dt, rd, MemOperand(rn, Offset)); + } + if (offset != 0) { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, rn, rn, offset); + } + return; + } + } + Assembler::Delegate(type, instruction, cond, dt, rd, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondDtDMop instruction, + Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand) { + CONTEXT_SCOPE; + if (operand.IsImmediate()) { + const Register& rn = operand.GetBaseRegister(); + AddrMode addrmode = operand.GetAddrMode(); + int32_t offset = operand.GetOffsetImmediate(); + VIXL_ASSERT(((offset > 0) && operand.GetSign().IsPlus()) || + ((offset < 0) && operand.GetSign().IsMinus()) || (offset == 0)); + if (rn.IsPC()) { + VIXL_ABORT_WITH_MSG( + "The MacroAssembler does not convert vldr or vstr with a PC base " + "register.\n"); + } + switch (addrmode) { + case PreIndex: + // Pre-Indexed case: + // vldr.64 d0, [r1, 12345]! will translate into + // add r1, 12345 + // vldr.64 d0, [r1] + if (offset != 0) { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, rn, rn, offset); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, dt, rd, MemOperand(rn, Offset)); + } + return; + case Offset: { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + // Offset case: + // vldr.64 d0, [r1, 12345] will translate into + // add ip, r1, 12345 + // vldr.32 s0, [ip] + { + VIXL_ASSERT(offset != 0); + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, scratch, rn, offset); + } + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, dt, rd, MemOperand(scratch, Offset)); + } + return; + } + case PostIndex: + // Post-indexed case: + // vldr.64 d0. [r1], imm32 will translate into + // vldr.64 d0, [r1] + // movw ip. imm32 & 0xffffffff + // movt ip, imm32 >> 16 + // add r1, ip + { + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + (this->*instruction)(cond, dt, rd, MemOperand(rn, Offset)); + } + if (offset != 0) { + CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes); + add(cond, rn, rn, offset); + } + return; + } + } + Assembler::Delegate(type, instruction, cond, dt, rd, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondMsrOp instruction, + Condition cond, + MaskedSpecialRegister spec_reg, + const Operand& operand) { + USE(type); + VIXL_ASSERT(type == kMsr); + if (operand.IsImmediate()) { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + { + CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes); + mov(cond, scratch, operand); + } + CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes); + msr(cond, spec_reg, scratch); + return; + } + Assembler::Delegate(type, instruction, cond, spec_reg, operand); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondDtDL instruction, + Condition cond, + DataType dt, + DRegister rd, + Location* location) { + VIXL_ASSERT(type == kVldr); + + CONTEXT_SCOPE; + + if (location->IsBound()) { + CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + uint32_t mask = GetOffsetMask(type, Offset); + vldr(dt, rd, MemOperandComputationHelper(cond, scratch, location, mask)); + return; + } + + Assembler::Delegate(type, instruction, cond, dt, rd, location); +} + + +void MacroAssembler::Delegate(InstructionType type, + InstructionCondDtSL instruction, + Condition cond, + DataType dt, + SRegister rd, + Location* location) { + VIXL_ASSERT(type == kVldr); + + CONTEXT_SCOPE; + + if (location->IsBound()) { + CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes); + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + uint32_t mask = GetOffsetMask(type, Offset); + vldr(dt, rd, MemOperandComputationHelper(cond, scratch, location, mask)); + return; + } + + Assembler::Delegate(type, instruction, cond, dt, rd, location); +} + + +#undef CONTEXT_SCOPE +#undef TOSTRING +#undef STRINGIFY + +// Start of generated code. +// End of generated code. +} // namespace aarch32 +} // namespace vixl diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch32/macro-assembler-aarch32.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/macro-assembler-aarch32.h new file mode 100644 index 00000000..115d4d84 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/macro-assembler-aarch32.h @@ -0,0 +1,11185 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH32_MACRO_ASSEMBLER_AARCH32_H_ +#define VIXL_AARCH32_MACRO_ASSEMBLER_AARCH32_H_ + +#include "code-generation-scopes-vixl.h" +#include "macro-assembler-interface.h" +#include "pool-manager-impl.h" +#include "pool-manager.h" +#include "utils-vixl.h" + +#include "aarch32/assembler-aarch32.h" +#include "aarch32/instructions-aarch32.h" +#include "aarch32/operands-aarch32.h" + +namespace vixl { + +namespace aarch32 { + +class UseScratchRegisterScope; + +enum FlagsUpdate { LeaveFlags = 0, SetFlags = 1, DontCare = 2 }; + +// We use a subclass to access the protected `ExactAssemblyScope` constructor +// giving us control over the pools, and make the constructor private to limit +// usage to code paths emitting pools. +class ExactAssemblyScopeWithoutPoolsCheck : public ExactAssemblyScope { + private: + ExactAssemblyScopeWithoutPoolsCheck(MacroAssembler* masm, + size_t size, + SizePolicy size_policy = kExactSize); + + friend class MacroAssembler; + friend class Label; +}; +// Macro assembler for aarch32 instruction set. +class MacroAssembler : public Assembler, public MacroAssemblerInterface { + public: + enum FinalizeOption { + kFallThrough, // There may be more code to execute after calling Finalize. + kUnreachable // Anything generated after calling Finalize is unreachable. + }; + + virtual internal::AssemblerBase* AsAssemblerBase() VIXL_OVERRIDE { + return this; + } + + virtual bool ArePoolsBlocked() const VIXL_OVERRIDE { + return pool_manager_.IsBlocked(); + } + + virtual void EmitPoolHeader() VIXL_OVERRIDE { + // Check that we have the correct alignment. + if (IsUsingT32()) { + VIXL_ASSERT(GetBuffer()->Is16bitAligned()); + } else { + VIXL_ASSERT(GetBuffer()->Is32bitAligned()); + } + VIXL_ASSERT(pool_end_ == NULL); + pool_end_ = new Label(); + ExactAssemblyScopeWithoutPoolsCheck guard(this, + kMaxInstructionSizeInBytes, + ExactAssemblyScope::kMaximumSize); + b(pool_end_); + } + virtual void EmitPoolFooter() VIXL_OVERRIDE { + // Align buffer to 4 bytes. + GetBuffer()->Align(); + if (pool_end_ != NULL) { + Bind(pool_end_); + delete pool_end_; + pool_end_ = NULL; + } + } + virtual void EmitPaddingBytes(int n) VIXL_OVERRIDE { + GetBuffer()->EmitZeroedBytes(n); + } + virtual void EmitNopBytes(int n) VIXL_OVERRIDE { + int nops = 0; + int nop_size = IsUsingT32() ? k16BitT32InstructionSizeInBytes + : kA32InstructionSizeInBytes; + VIXL_ASSERT(n % nop_size == 0); + nops = n / nop_size; + ExactAssemblyScopeWithoutPoolsCheck guard(this, + n, + ExactAssemblyScope::kExactSize); + for (int i = 0; i < nops; ++i) { + nop(); + } + } + + + private: + class MacroEmissionCheckScope : public EmissionCheckScope { + public: + explicit MacroEmissionCheckScope(MacroAssemblerInterface* masm, + PoolPolicy pool_policy = kBlockPools) + : EmissionCheckScope(masm, + kTypicalMacroInstructionMaxSize, + kMaximumSize, + pool_policy) {} + + private: + static const size_t kTypicalMacroInstructionMaxSize = + 8 * kMaxInstructionSizeInBytes; + }; + + class MacroAssemblerContext { + public: + MacroAssemblerContext() : count_(0) {} + ~MacroAssemblerContext() {} + unsigned GetRecursiveCount() const { return count_; } + void Up(const char* loc) { + location_stack_[count_] = loc; + count_++; + if (count_ >= kMaxRecursion) { + printf( + "Recursion limit reached; unable to resolve macro assembler " + "call.\n"); + printf("Macro assembler context stack:\n"); + for (unsigned i = 0; i < kMaxRecursion; i++) { + printf("%10s %s\n", (i == 0) ? "oldest -> " : "", location_stack_[i]); + } + VIXL_ABORT(); + } + } + void Down() { + VIXL_ASSERT((count_ > 0) && (count_ < kMaxRecursion)); + count_--; + } + + private: + unsigned count_; + static const uint32_t kMaxRecursion = 6; + const char* location_stack_[kMaxRecursion]; + }; + + // This scope is used at each Delegate entry to avoid infinite recursion of + // Delegate calls. The limit is defined by + // MacroAssemblerContext::kMaxRecursion. + class ContextScope { + public: + explicit ContextScope(MacroAssembler* const masm, const char* loc) + : masm_(masm) { + VIXL_ASSERT(masm_->AllowMacroInstructions()); + masm_->GetContext()->Up(loc); + } + ~ContextScope() { masm_->GetContext()->Down(); } + + private: + MacroAssembler* const masm_; + }; + + MacroAssemblerContext* GetContext() { return &context_; } + + class ITScope { + public: + ITScope(MacroAssembler* masm, + Condition* cond, + const MacroEmissionCheckScope& scope, + bool can_use_it = false) + : masm_(masm), cond_(*cond), can_use_it_(can_use_it) { + // The 'scope' argument is used to remind us to only use this scope inside + // a MacroEmissionCheckScope. This way, we do not need to check whether + // we need to emit the pools or grow the code buffer when emitting the + // IT or B instructions. + USE(scope); + if (!cond_.Is(al) && masm->IsUsingT32()) { + if (can_use_it_) { + // IT is not deprecated (that implies a 16 bit T32 instruction). + // We generate an IT instruction and a conditional instruction. + masm->it(cond_); + } else { + // The usage of IT is deprecated for the instruction. + // We generate a conditional branch and an unconditional instruction. + // Generate the branch. + masm_->b(cond_.Negate(), Narrow, &label_); + // Tell the macro-assembler to generate unconditional instructions. + *cond = al; + } + } +#ifdef VIXL_DEBUG + initial_cursor_offset_ = masm->GetCursorOffset(); +#else + USE(initial_cursor_offset_); +#endif + } + ~ITScope() { + if (label_.IsReferenced()) { + // We only use the label for conditional T32 instructions for which we + // cannot use IT. + VIXL_ASSERT(!cond_.Is(al)); + VIXL_ASSERT(masm_->IsUsingT32()); + VIXL_ASSERT(!can_use_it_); + VIXL_ASSERT(masm_->GetCursorOffset() - initial_cursor_offset_ <= + kMaxT32MacroInstructionSizeInBytes); + masm_->BindHelper(&label_); + } else if (masm_->IsUsingT32() && !cond_.Is(al)) { + // If we've generated a conditional T32 instruction but haven't used the + // label, we must have used IT. Check that we did not generate a + // deprecated sequence. + VIXL_ASSERT(can_use_it_); + VIXL_ASSERT(masm_->GetCursorOffset() - initial_cursor_offset_ <= + k16BitT32InstructionSizeInBytes); + } + } + + private: + MacroAssembler* masm_; + Condition cond_; + Label label_; + bool can_use_it_; + uint32_t initial_cursor_offset_; + }; + + protected: + virtual void BlockPools() VIXL_OVERRIDE { pool_manager_.Block(); } + virtual void ReleasePools() VIXL_OVERRIDE { + pool_manager_.Release(GetCursorOffset()); + } + virtual void EnsureEmitPoolsFor(size_t size) VIXL_OVERRIDE; + + // Tell whether any of the macro instruction can be used. When false the + // MacroAssembler will assert if a method which can emit a variable number + // of instructions is called. + virtual void SetAllowMacroInstructions(bool value) VIXL_OVERRIDE { + allow_macro_instructions_ = value; + } + + void HandleOutOfBoundsImmediate(Condition cond, Register tmp, uint32_t imm); + + public: + // TODO: If we change the MacroAssembler to disallow setting a different ISA, + // we can change the alignment of the pool in the pool manager constructor to + // be 2 bytes for T32. + explicit MacroAssembler(InstructionSet isa = kDefaultISA) + : Assembler(isa), + available_(r12), + current_scratch_scope_(NULL), + pool_manager_(4 /*header_size*/, + 4 /*alignment*/, + 4 /*buffer_alignment*/), + generate_simulator_code_(VIXL_AARCH32_GENERATE_SIMULATOR_CODE), + pool_end_(NULL) { +#ifdef VIXL_DEBUG + SetAllowMacroInstructions(true); +#else + USE(allow_macro_instructions_); +#endif + } + explicit MacroAssembler(size_t size, InstructionSet isa = kDefaultISA) + : Assembler(size, isa), + available_(r12), + current_scratch_scope_(NULL), + pool_manager_(4 /*header_size*/, + 4 /*alignment*/, + 4 /*buffer_alignment*/), + generate_simulator_code_(VIXL_AARCH32_GENERATE_SIMULATOR_CODE), + pool_end_(NULL) { +#ifdef VIXL_DEBUG + SetAllowMacroInstructions(true); +#endif + } + MacroAssembler(byte* buffer, size_t size, InstructionSet isa = kDefaultISA) + : Assembler(buffer, size, isa), + available_(r12), + current_scratch_scope_(NULL), + pool_manager_(4 /*header_size*/, + 4 /*alignment*/, + 4 /*buffer_alignment*/), + generate_simulator_code_(VIXL_AARCH32_GENERATE_SIMULATOR_CODE), + pool_end_(NULL) { +#ifdef VIXL_DEBUG + SetAllowMacroInstructions(true); +#endif + } + + bool GenerateSimulatorCode() const { return generate_simulator_code_; } + + virtual bool AllowMacroInstructions() const VIXL_OVERRIDE { + return allow_macro_instructions_; + } + + void FinalizeCode(FinalizeOption option = kUnreachable) { + EmitLiteralPool(option == kUnreachable + ? PoolManager::kNoBranchRequired + : PoolManager::kBranchRequired); + Assembler::FinalizeCode(); + } + + RegisterList* GetScratchRegisterList() { return &available_; } + VRegisterList* GetScratchVRegisterList() { return &available_vfp_; } + + // Get or set the current (most-deeply-nested) UseScratchRegisterScope. + void SetCurrentScratchRegisterScope(UseScratchRegisterScope* scope) { + current_scratch_scope_ = scope; + } + UseScratchRegisterScope* GetCurrentScratchRegisterScope() { + return current_scratch_scope_; + } + + // Given an address calculation (Register + immediate), generate code to + // partially compute the address. The returned MemOperand will perform any + // remaining computation in a subsequent load or store instruction. + // + // The offset provided should be the offset that would be used in a load or + // store instruction (if it had sufficient range). This only matters where + // base.Is(pc), since load and store instructions align the pc before + // dereferencing it. + // + // TODO: Improve the handling of negative offsets. They are not implemented + // precisely for now because they only have a marginal benefit for the + // existing uses (in delegates). + MemOperand MemOperandComputationHelper(Condition cond, + Register scratch, + Register base, + uint32_t offset, + uint32_t extra_offset_mask = 0); + + MemOperand MemOperandComputationHelper(Register scratch, + Register base, + uint32_t offset, + uint32_t extra_offset_mask = 0) { + return MemOperandComputationHelper(al, + scratch, + base, + offset, + extra_offset_mask); + } + MemOperand MemOperandComputationHelper(Condition cond, + Register scratch, + Location* location, + uint32_t extra_offset_mask = 0) { + // Check for buffer space _before_ calculating the offset, in case we + // generate a pool that affects the offset calculation. + CodeBufferCheckScope scope(this, 4 * kMaxInstructionSizeInBytes); + Label::Offset offset = + location->GetLocation() - + AlignDown(GetCursorOffset() + GetArchitectureStatePCOffset(), 4); + return MemOperandComputationHelper(cond, + scratch, + pc, + offset, + extra_offset_mask); + } + MemOperand MemOperandComputationHelper(Register scratch, + Location* location, + uint32_t extra_offset_mask = 0) { + return MemOperandComputationHelper(al, + scratch, + location, + extra_offset_mask); + } + + // Determine the appropriate mask to pass into MemOperandComputationHelper. + uint32_t GetOffsetMask(InstructionType type, AddrMode addrmode); + + // State and type helpers. + bool IsModifiedImmediate(uint32_t imm) { + return IsUsingT32() ? ImmediateT32::IsImmediateT32(imm) + : ImmediateA32::IsImmediateA32(imm); + } + + void Bind(Label* label) { + VIXL_ASSERT(allow_macro_instructions_); + BindHelper(label); + } + + virtual void BindHelper(Label* label) VIXL_OVERRIDE { + // Assert that we have the correct buffer alignment. + if (IsUsingT32()) { + VIXL_ASSERT(GetBuffer()->Is16bitAligned()); + } else { + VIXL_ASSERT(GetBuffer()->Is32bitAligned()); + } + // If we need to add padding, check if we have to emit the pool. + const int32_t pc = GetCursorOffset(); + if (label->Needs16BitPadding(pc)) { + const int kPaddingBytes = 2; + if (pool_manager_.MustEmit(pc, kPaddingBytes)) { + int32_t new_pc = pool_manager_.Emit(this, pc, kPaddingBytes); + USE(new_pc); + VIXL_ASSERT(new_pc == GetCursorOffset()); + } + } + pool_manager_.Bind(this, label, GetCursorOffset()); + } + + void RegisterLiteralReference(RawLiteral* literal) { + if (literal->IsManuallyPlaced()) return; + RegisterForwardReference(literal); + } + + void RegisterForwardReference(Location* location) { + if (location->IsBound()) return; + VIXL_ASSERT(location->HasForwardReferences()); + const Location::ForwardRef& reference = location->GetLastForwardReference(); + pool_manager_.AddObjectReference(&reference, location); + } + + void CheckEmitPoolForInstruction(const ReferenceInfo* info, + Location* location, + Condition* cond = NULL) { + int size = info->size; + int32_t pc = GetCursorOffset(); + // If we need to emit a branch over the instruction, take this into account. + if ((cond != NULL) && NeedBranch(cond)) { + size += kBranchSize; + pc += kBranchSize; + } + int32_t from = pc; + from += IsUsingT32() ? kT32PcDelta : kA32PcDelta; + if (info->pc_needs_aligning) from = AlignDown(from, 4); + int32_t min = from + info->min_offset; + int32_t max = from + info->max_offset; + ForwardReference temp_ref(pc, + info->size, + min, + max, + info->alignment); + if (pool_manager_.MustEmit(GetCursorOffset(), size, &temp_ref, location)) { + int32_t new_pc = pool_manager_.Emit(this, + GetCursorOffset(), + info->size, + &temp_ref, + location); + USE(new_pc); + VIXL_ASSERT(new_pc == GetCursorOffset()); + } + } + + void Place(RawLiteral* literal) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(literal->IsManuallyPlaced()); + // Check if we need to emit the pools. Take the alignment of the literal + // into account, as well as potential 16-bit padding needed to reach the + // minimum accessible location. + int alignment = literal->GetMaxAlignment(); + int32_t pc = GetCursorOffset(); + int total_size = AlignUp(pc, alignment) - pc + literal->GetSize(); + if (literal->Needs16BitPadding(pc)) total_size += 2; + if (pool_manager_.MustEmit(pc, total_size)) { + int32_t new_pc = pool_manager_.Emit(this, pc, total_size); + USE(new_pc); + VIXL_ASSERT(new_pc == GetCursorOffset()); + } + pool_manager_.Bind(this, literal, GetCursorOffset()); + literal->EmitPoolObject(this); + // Align the buffer, to be ready to generate instructions right after + // this. + GetBuffer()->Align(); + } + + void EmitLiteralPool(PoolManager::EmitOption option = + PoolManager::kBranchRequired) { + VIXL_ASSERT(!ArePoolsBlocked()); + int32_t new_pc = + pool_manager_.Emit(this, GetCursorOffset(), 0, NULL, NULL, option); + VIXL_ASSERT(new_pc == GetCursorOffset()); + USE(new_pc); + } + + void EnsureEmitFor(uint32_t size) { + EnsureEmitPoolsFor(size); + VIXL_ASSERT(GetBuffer()->HasSpaceFor(size) || GetBuffer()->IsManaged()); + GetBuffer()->EnsureSpaceFor(size); + } + + bool AliasesAvailableScratchRegister(Register reg) { + return GetScratchRegisterList()->Includes(reg); + } + + bool AliasesAvailableScratchRegister(RegisterOrAPSR_nzcv reg) { + if (reg.IsAPSR_nzcv()) return false; + return GetScratchRegisterList()->Includes(reg.AsRegister()); + } + + bool AliasesAvailableScratchRegister(VRegister reg) { + return GetScratchVRegisterList()->IncludesAliasOf(reg); + } + + bool AliasesAvailableScratchRegister(const Operand& operand) { + if (operand.IsImmediate()) return false; + return AliasesAvailableScratchRegister(operand.GetBaseRegister()) || + (operand.IsRegisterShiftedRegister() && + AliasesAvailableScratchRegister(operand.GetShiftRegister())); + } + + bool AliasesAvailableScratchRegister(const NeonOperand& operand) { + if (operand.IsImmediate()) return false; + return AliasesAvailableScratchRegister(operand.GetRegister()); + } + + bool AliasesAvailableScratchRegister(SRegisterList list) { + for (int n = 0; n < list.GetLength(); n++) { + if (AliasesAvailableScratchRegister(list.GetSRegister(n))) return true; + } + return false; + } + + bool AliasesAvailableScratchRegister(DRegisterList list) { + for (int n = 0; n < list.GetLength(); n++) { + if (AliasesAvailableScratchRegister(list.GetDRegister(n))) return true; + } + return false; + } + + bool AliasesAvailableScratchRegister(NeonRegisterList list) { + for (int n = 0; n < list.GetLength(); n++) { + if (AliasesAvailableScratchRegister(list.GetDRegister(n))) return true; + } + return false; + } + + bool AliasesAvailableScratchRegister(RegisterList list) { + return GetScratchRegisterList()->Overlaps(list); + } + + bool AliasesAvailableScratchRegister(const MemOperand& operand) { + return AliasesAvailableScratchRegister(operand.GetBaseRegister()) || + (operand.IsShiftedRegister() && + AliasesAvailableScratchRegister(operand.GetOffsetRegister())); + } + + // Adr with a literal already constructed. Add the literal to the pool if it + // is not already done. + void Adr(Condition cond, Register rd, RawLiteral* literal) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!literal->IsBound()) { + const ReferenceInfo* info; + bool can_encode = adr_info(cond, Best, rd, literal, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, literal, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + adr(cond, Best, rd, literal); + RegisterLiteralReference(literal); + } + void Adr(Register rd, RawLiteral* literal) { Adr(al, rd, literal); } + + // Loads with literals already constructed. Add the literal to the pool + // if it is not already done. + void Ldr(Condition cond, Register rt, RawLiteral* literal) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!literal->IsBound()) { + const ReferenceInfo* info; + bool can_encode = ldr_info(cond, Best, rt, literal, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, literal, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + ldr(cond, rt, literal); + RegisterLiteralReference(literal); + } + void Ldr(Register rt, RawLiteral* literal) { Ldr(al, rt, literal); } + + void Ldrb(Condition cond, Register rt, RawLiteral* literal) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!literal->IsBound()) { + const ReferenceInfo* info; + bool can_encode = ldrb_info(cond, rt, literal, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, literal, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + ldrb(cond, rt, literal); + RegisterLiteralReference(literal); + } + void Ldrb(Register rt, RawLiteral* literal) { Ldrb(al, rt, literal); } + + void Ldrd(Condition cond, Register rt, Register rt2, RawLiteral* literal) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!literal->IsBound()) { + const ReferenceInfo* info; + bool can_encode = ldrd_info(cond, rt, rt2, literal, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, literal, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + ldrd(cond, rt, rt2, literal); + RegisterLiteralReference(literal); + } + void Ldrd(Register rt, Register rt2, RawLiteral* literal) { + Ldrd(al, rt, rt2, literal); + } + + void Ldrh(Condition cond, Register rt, RawLiteral* literal) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!literal->IsBound()) { + const ReferenceInfo* info; + bool can_encode = ldrh_info(cond, rt, literal, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, literal, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + ldrh(cond, rt, literal); + RegisterLiteralReference(literal); + } + void Ldrh(Register rt, RawLiteral* literal) { Ldrh(al, rt, literal); } + + void Ldrsb(Condition cond, Register rt, RawLiteral* literal) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!literal->IsBound()) { + const ReferenceInfo* info; + bool can_encode = ldrsb_info(cond, rt, literal, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, literal, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + ldrsb(cond, rt, literal); + RegisterLiteralReference(literal); + } + void Ldrsb(Register rt, RawLiteral* literal) { Ldrsb(al, rt, literal); } + + void Ldrsh(Condition cond, Register rt, RawLiteral* literal) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!literal->IsBound()) { + const ReferenceInfo* info; + bool can_encode = ldrsh_info(cond, rt, literal, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, literal, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + ldrsh(cond, rt, literal); + RegisterLiteralReference(literal); + } + void Ldrsh(Register rt, RawLiteral* literal) { Ldrsh(al, rt, literal); } + + void Vldr(Condition cond, DataType dt, DRegister rd, RawLiteral* literal) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!literal->IsBound()) { + const ReferenceInfo* info; + bool can_encode = vldr_info(cond, dt, rd, literal, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, literal, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + vldr(cond, dt, rd, literal); + RegisterLiteralReference(literal); + } + void Vldr(DataType dt, DRegister rd, RawLiteral* literal) { + Vldr(al, dt, rd, literal); + } + void Vldr(Condition cond, DRegister rd, RawLiteral* literal) { + Vldr(cond, Untyped64, rd, literal); + } + void Vldr(DRegister rd, RawLiteral* literal) { + Vldr(al, Untyped64, rd, literal); + } + + void Vldr(Condition cond, DataType dt, SRegister rd, RawLiteral* literal) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!literal->IsBound()) { + const ReferenceInfo* info; + bool can_encode = vldr_info(cond, dt, rd, literal, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, literal, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + vldr(cond, dt, rd, literal); + RegisterLiteralReference(literal); + } + void Vldr(DataType dt, SRegister rd, RawLiteral* literal) { + Vldr(al, dt, rd, literal); + } + void Vldr(Condition cond, SRegister rd, RawLiteral* literal) { + Vldr(cond, Untyped32, rd, literal); + } + void Vldr(SRegister rd, RawLiteral* literal) { + Vldr(al, Untyped32, rd, literal); + } + + // Generic Ldr(register, data) + void Ldr(Condition cond, Register rt, uint32_t v) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + RawLiteral* literal = + new Literal(v, RawLiteral::kDeletedOnPlacementByPool); + Ldr(cond, rt, literal); + } + template + void Ldr(Register rt, T v) { + Ldr(al, rt, v); + } + + // Generic Ldrd(rt, rt2, data) + void Ldrd(Condition cond, Register rt, Register rt2, uint64_t v) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + RawLiteral* literal = + new Literal(v, RawLiteral::kDeletedOnPlacementByPool); + Ldrd(cond, rt, rt2, literal); + } + template + void Ldrd(Register rt, Register rt2, T v) { + Ldrd(al, rt, rt2, v); + } + + void Vldr(Condition cond, SRegister rd, float v) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + RawLiteral* literal = + new Literal(v, RawLiteral::kDeletedOnPlacementByPool); + Vldr(cond, rd, literal); + } + void Vldr(SRegister rd, float v) { Vldr(al, rd, v); } + + void Vldr(Condition cond, DRegister rd, double v) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + RawLiteral* literal = + new Literal(v, RawLiteral::kDeletedOnPlacementByPool); + Vldr(cond, rd, literal); + } + void Vldr(DRegister rd, double v) { Vldr(al, rd, v); } + + void Vmov(Condition cond, DRegister rt, double v) { Vmov(cond, F64, rt, v); } + void Vmov(DRegister rt, double v) { Vmov(al, F64, rt, v); } + void Vmov(Condition cond, SRegister rt, float v) { Vmov(cond, F32, rt, v); } + void Vmov(SRegister rt, float v) { Vmov(al, F32, rt, v); } + + // Claim memory on the stack. + // Note that the Claim, Drop, and Peek helpers below ensure that offsets used + // are multiples of 32 bits to help maintain 32-bit SP alignment. + // We could `Align{Up,Down}(size, 4)`, but that's potentially problematic: + // Claim(3) + // Claim(1) + // Drop(4) + // would seem correct, when in fact: + // Claim(3) -> sp = sp - 4 + // Claim(1) -> sp = sp - 4 + // Drop(4) -> sp = sp + 4 + // + void Claim(int32_t size) { + if (size == 0) return; + // The stack must be kept 32bit aligned. + VIXL_ASSERT((size > 0) && ((size % 4) == 0)); + Sub(sp, sp, size); + } + // Release memory on the stack + void Drop(int32_t size) { + if (size == 0) return; + // The stack must be kept 32bit aligned. + VIXL_ASSERT((size > 0) && ((size % 4) == 0)); + Add(sp, sp, size); + } + void Peek(Register dst, int32_t offset) { + VIXL_ASSERT((offset >= 0) && ((offset % 4) == 0)); + Ldr(dst, MemOperand(sp, offset)); + } + void Poke(Register src, int32_t offset) { + VIXL_ASSERT((offset >= 0) && ((offset % 4) == 0)); + Str(src, MemOperand(sp, offset)); + } + void Printf(const char* format, + CPURegister reg1 = NoReg, + CPURegister reg2 = NoReg, + CPURegister reg3 = NoReg, + CPURegister reg4 = NoReg); + // Functions used by Printf for generation. + void PushRegister(CPURegister reg); + void PreparePrintfArgument(CPURegister reg, + int* core_count, + int* vfp_count, + uint32_t* printf_type); + // Handlers for cases not handled by the assembler. + // ADD, MOVT, MOVW, SUB, SXTB16, TEQ, UXTB16 + virtual void Delegate(InstructionType type, + InstructionCondROp instruction, + Condition cond, + Register rn, + const Operand& operand) VIXL_OVERRIDE; + // CMN, CMP, MOV, MOVS, MVN, MVNS, SXTB, SXTH, TST, UXTB, UXTH + virtual void Delegate(InstructionType type, + InstructionCondSizeROp instruction, + Condition cond, + EncodingSize size, + Register rn, + const Operand& operand) VIXL_OVERRIDE; + // ADDW, ORN, ORNS, PKHBT, PKHTB, RSC, RSCS, SUBW, SXTAB, SXTAB16, SXTAH, + // UXTAB, UXTAB16, UXTAH + virtual void Delegate(InstructionType type, + InstructionCondRROp instruction, + Condition cond, + Register rd, + Register rn, + const Operand& operand) VIXL_OVERRIDE; + // ADC, ADCS, ADD, ADDS, AND, ANDS, ASR, ASRS, BIC, BICS, EOR, EORS, LSL, + // LSLS, LSR, LSRS, ORR, ORRS, ROR, RORS, RSB, RSBS, SBC, SBCS, SUB, SUBS + virtual void Delegate(InstructionType type, + InstructionCondSizeRL instruction, + Condition cond, + EncodingSize size, + Register rd, + Location* location) VIXL_OVERRIDE; + bool GenerateSplitInstruction(InstructionCondSizeRROp instruction, + Condition cond, + Register rd, + Register rn, + uint32_t imm, + uint32_t mask); + virtual void Delegate(InstructionType type, + InstructionCondSizeRROp instruction, + Condition cond, + EncodingSize size, + Register rd, + Register rn, + const Operand& operand) VIXL_OVERRIDE; + // CBNZ, CBZ + virtual void Delegate(InstructionType type, + InstructionRL instruction, + Register rn, + Location* location) VIXL_OVERRIDE; + // VMOV + virtual void Delegate(InstructionType type, + InstructionCondDtSSop instruction, + Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) VIXL_OVERRIDE; + // VMOV, VMVN + virtual void Delegate(InstructionType type, + InstructionCondDtDDop instruction, + Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) VIXL_OVERRIDE; + // VMOV, VMVN + virtual void Delegate(InstructionType type, + InstructionCondDtQQop instruction, + Condition cond, + DataType dt, + QRegister rd, + const QOperand& operand) VIXL_OVERRIDE; + // LDR, LDRB, LDRH, LDRSB, LDRSH, STR, STRB, STRH + virtual void Delegate(InstructionType type, + InstructionCondSizeRMop instruction, + Condition cond, + EncodingSize size, + Register rd, + const MemOperand& operand) VIXL_OVERRIDE; + // LDAEXD, LDRD, LDREXD, STLEX, STLEXB, STLEXH, STRD, STREX, STREXB, STREXH + virtual void Delegate(InstructionType type, + InstructionCondRL instruction, + Condition cond, + Register rt, + Location* location) VIXL_OVERRIDE; + virtual void Delegate(InstructionType type, + InstructionCondRRL instruction, + Condition cond, + Register rt, + Register rt2, + Location* location) VIXL_OVERRIDE; + virtual void Delegate(InstructionType type, + InstructionCondRRMop instruction, + Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) VIXL_OVERRIDE; + // VLDR, VSTR + virtual void Delegate(InstructionType type, + InstructionCondDtSMop instruction, + Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand) VIXL_OVERRIDE; + // VLDR, VSTR + virtual void Delegate(InstructionType type, + InstructionCondDtDMop instruction, + Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand) VIXL_OVERRIDE; + // MSR + virtual void Delegate(InstructionType type, + InstructionCondMsrOp instruction, + Condition cond, + MaskedSpecialRegister spec_reg, + const Operand& operand) VIXL_OVERRIDE; + virtual void Delegate(InstructionType type, + InstructionCondDtDL instruction, + Condition cond, + DataType dt, + DRegister rd, + Location* location) VIXL_OVERRIDE; + virtual void Delegate(InstructionType type, + InstructionCondDtSL instruction, + Condition cond, + DataType dt, + SRegister rd, + Location* location) VIXL_OVERRIDE; + + // Start of generated code. + + void Adc(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // ADC{} {,} , ; T1 + operand.IsPlainRegister() && rn.IsLow() && rd.Is(rn) && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + adc(cond, rd, rn, operand); + } + void Adc(Register rd, Register rn, const Operand& operand) { + Adc(al, rd, rn, operand); + } + void Adc(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Adc(cond, rd, rn, operand); + break; + case SetFlags: + Adcs(cond, rd, rn, operand); + break; + case DontCare: + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + rn.Is(rd) && operand.IsPlainRegister() && + operand.GetBaseRegister().IsLow(); + if (setflags_is_smaller) { + Adcs(cond, rd, rn, operand); + } else { + Adc(cond, rd, rn, operand); + } + break; + } + } + void Adc(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Adc(flags, al, rd, rn, operand); + } + + void Adcs(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + adcs(cond, rd, rn, operand); + } + void Adcs(Register rd, Register rn, const Operand& operand) { + Adcs(al, rd, rn, operand); + } + + void Add(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + if (cond.Is(al) && rd.Is(rn) && operand.IsImmediate()) { + uint32_t immediate = operand.GetImmediate(); + if (immediate == 0) { + return; + } + } + bool can_use_it = + // ADD{} , , # ; T1 + (operand.IsImmediate() && (operand.GetImmediate() <= 7) && rn.IsLow() && + rd.IsLow()) || + // ADD{} {,} , # ; T2 + (operand.IsImmediate() && (operand.GetImmediate() <= 255) && + rd.IsLow() && rn.Is(rd)) || + // ADD{}{} , SP, # ; T1 + (operand.IsImmediate() && (operand.GetImmediate() <= 1020) && + ((operand.GetImmediate() & 0x3) == 0) && rd.IsLow() && rn.IsSP()) || + // ADD{} , , + (operand.IsPlainRegister() && rd.IsLow() && rn.IsLow() && + operand.GetBaseRegister().IsLow()) || + // ADD{} , ; T2 + (operand.IsPlainRegister() && !rd.IsPC() && rn.Is(rd) && + !operand.GetBaseRegister().IsSP() && + !operand.GetBaseRegister().IsPC()) || + // ADD{}{} {,} SP, ; T1 + (operand.IsPlainRegister() && !rd.IsPC() && rn.IsSP() && + operand.GetBaseRegister().Is(rd)); + ITScope it_scope(this, &cond, guard, can_use_it); + add(cond, rd, rn, operand); + } + void Add(Register rd, Register rn, const Operand& operand) { + Add(al, rd, rn, operand); + } + void Add(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Add(cond, rd, rn, operand); + break; + case SetFlags: + Adds(cond, rd, rn, operand); + break; + case DontCare: + bool setflags_is_smaller = + IsUsingT32() && cond.Is(al) && + ((operand.IsPlainRegister() && rd.IsLow() && rn.IsLow() && + !rd.Is(rn) && operand.GetBaseRegister().IsLow()) || + (operand.IsImmediate() && + ((rd.IsLow() && rn.IsLow() && (operand.GetImmediate() < 8)) || + (rd.IsLow() && rn.Is(rd) && (operand.GetImmediate() < 256))))); + if (setflags_is_smaller) { + Adds(cond, rd, rn, operand); + } else { + bool changed_op_is_smaller = + operand.IsImmediate() && (operand.GetSignedImmediate() < 0) && + ((rd.IsLow() && rn.IsLow() && + (operand.GetSignedImmediate() >= -7)) || + (rd.IsLow() && rn.Is(rd) && + (operand.GetSignedImmediate() >= -255))); + if (changed_op_is_smaller) { + Subs(cond, rd, rn, -operand.GetSignedImmediate()); + } else { + Add(cond, rd, rn, operand); + } + } + break; + } + } + void Add(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Add(flags, al, rd, rn, operand); + } + + void Adds(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + adds(cond, rd, rn, operand); + } + void Adds(Register rd, Register rn, const Operand& operand) { + Adds(al, rd, rn, operand); + } + + void And(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + if (rd.Is(rn) && operand.IsPlainRegister() && + rd.Is(operand.GetBaseRegister())) { + return; + } + if (cond.Is(al) && operand.IsImmediate()) { + uint32_t immediate = operand.GetImmediate(); + if (immediate == 0) { + mov(rd, 0); + return; + } + if ((immediate == 0xffffffff) && rd.Is(rn)) { + return; + } + } + bool can_use_it = + // AND{} {,} , ; T1 + operand.IsPlainRegister() && rd.Is(rn) && rn.IsLow() && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + and_(cond, rd, rn, operand); + } + void And(Register rd, Register rn, const Operand& operand) { + And(al, rd, rn, operand); + } + void And(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + And(cond, rd, rn, operand); + break; + case SetFlags: + Ands(cond, rd, rn, operand); + break; + case DontCare: + if (operand.IsPlainRegister() && rd.Is(rn) && + rd.Is(operand.GetBaseRegister())) { + return; + } + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + rn.Is(rd) && operand.IsPlainRegister() && + operand.GetBaseRegister().IsLow(); + if (setflags_is_smaller) { + Ands(cond, rd, rn, operand); + } else { + And(cond, rd, rn, operand); + } + break; + } + } + void And(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + And(flags, al, rd, rn, operand); + } + + void Ands(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ands(cond, rd, rn, operand); + } + void Ands(Register rd, Register rn, const Operand& operand) { + Ands(al, rd, rn, operand); + } + + void Asr(Condition cond, Register rd, Register rm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // ASR{} {,} , # ; T2 + (operand.IsImmediate() && (operand.GetImmediate() >= 1) && + (operand.GetImmediate() <= 32) && rd.IsLow() && rm.IsLow()) || + // ASR{} {,} , ; T1 + (operand.IsPlainRegister() && rd.Is(rm) && rd.IsLow() && + operand.GetBaseRegister().IsLow()); + ITScope it_scope(this, &cond, guard, can_use_it); + asr(cond, rd, rm, operand); + } + void Asr(Register rd, Register rm, const Operand& operand) { + Asr(al, rd, rm, operand); + } + void Asr(FlagsUpdate flags, + Condition cond, + Register rd, + Register rm, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Asr(cond, rd, rm, operand); + break; + case SetFlags: + Asrs(cond, rd, rm, operand); + break; + case DontCare: + bool setflags_is_smaller = + IsUsingT32() && cond.Is(al) && rd.IsLow() && rm.IsLow() && + ((operand.IsImmediate() && (operand.GetImmediate() >= 1) && + (operand.GetImmediate() <= 32)) || + (operand.IsPlainRegister() && rd.Is(rm))); + if (setflags_is_smaller) { + Asrs(cond, rd, rm, operand); + } else { + Asr(cond, rd, rm, operand); + } + break; + } + } + void Asr(FlagsUpdate flags, + Register rd, + Register rm, + const Operand& operand) { + Asr(flags, al, rd, rm, operand); + } + + void Asrs(Condition cond, Register rd, Register rm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + asrs(cond, rd, rm, operand); + } + void Asrs(Register rd, Register rm, const Operand& operand) { + Asrs(al, rd, rm, operand); + } + + void B(Condition cond, Label* label, BranchHint hint = kBranchWithoutHint) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + EncodingSize size = Best; + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!label->IsBound()) { + if (hint == kNear) size = Narrow; + const ReferenceInfo* info; + bool can_encode = b_info(cond, size, label, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, label, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + b(cond, size, label); + RegisterForwardReference(label); + } + void B(Label* label, BranchHint hint = kBranchWithoutHint) { + B(al, label, hint); + } + void BPreferNear(Condition cond, Label* label) { B(cond, label, kNear); } + void BPreferNear(Label* label) { B(al, label, kNear); } + + void Bfc(Condition cond, Register rd, uint32_t lsb, uint32_t width) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + bfc(cond, rd, lsb, width); + } + void Bfc(Register rd, uint32_t lsb, uint32_t width) { + Bfc(al, rd, lsb, width); + } + + void Bfi( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + bfi(cond, rd, rn, lsb, width); + } + void Bfi(Register rd, Register rn, uint32_t lsb, uint32_t width) { + Bfi(al, rd, rn, lsb, width); + } + + void Bic(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + if (cond.Is(al) && operand.IsImmediate()) { + uint32_t immediate = operand.GetImmediate(); + if ((immediate == 0) && rd.Is(rn)) { + return; + } + if (immediate == 0xffffffff) { + mov(rd, 0); + return; + } + } + bool can_use_it = + // BIC{} {,} , ; T1 + operand.IsPlainRegister() && rd.Is(rn) && rn.IsLow() && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + bic(cond, rd, rn, operand); + } + void Bic(Register rd, Register rn, const Operand& operand) { + Bic(al, rd, rn, operand); + } + void Bic(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Bic(cond, rd, rn, operand); + break; + case SetFlags: + Bics(cond, rd, rn, operand); + break; + case DontCare: + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + rn.Is(rd) && operand.IsPlainRegister() && + operand.GetBaseRegister().IsLow(); + if (setflags_is_smaller) { + Bics(cond, rd, rn, operand); + } else { + Bic(cond, rd, rn, operand); + } + break; + } + } + void Bic(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Bic(flags, al, rd, rn, operand); + } + + void Bics(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + bics(cond, rd, rn, operand); + } + void Bics(Register rd, Register rn, const Operand& operand) { + Bics(al, rd, rn, operand); + } + + void Bkpt(Condition cond, uint32_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + bkpt(cond, imm); + } + void Bkpt(uint32_t imm) { Bkpt(al, imm); } + + void Bl(Condition cond, Label* label) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!label->IsBound()) { + const ReferenceInfo* info; + bool can_encode = bl_info(cond, label, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, label, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + bl(cond, label); + RegisterForwardReference(label); + } + void Bl(Label* label) { Bl(al, label); } + + void Blx(Condition cond, Label* label) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!label->IsBound()) { + const ReferenceInfo* info; + bool can_encode = blx_info(cond, label, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, label, &cond); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + ITScope it_scope(this, &cond, guard); + blx(cond, label); + RegisterForwardReference(label); + } + void Blx(Label* label) { Blx(al, label); } + + void Blx(Condition cond, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // BLX{}{} ; T1 + !rm.IsPC(); + ITScope it_scope(this, &cond, guard, can_use_it); + blx(cond, rm); + } + void Blx(Register rm) { Blx(al, rm); } + + void Bx(Condition cond, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // BX{}{} ; T1 + !rm.IsPC(); + ITScope it_scope(this, &cond, guard, can_use_it); + bx(cond, rm); + } + void Bx(Register rm) { Bx(al, rm); } + + void Bxj(Condition cond, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + bxj(cond, rm); + } + void Bxj(Register rm) { Bxj(al, rm); } + + void Cbnz(Register rn, Label* label) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!label->IsBound()) { + const ReferenceInfo* info; + bool can_encode = cbnz_info(rn, label, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, label); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + cbnz(rn, label); + RegisterForwardReference(label); + } + + void Cbz(Register rn, Label* label) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope::PoolPolicy pool_policy = + MacroEmissionCheckScope::kBlockPools; + if (!label->IsBound()) { + const ReferenceInfo* info; + bool can_encode = cbz_info(rn, label, &info); + VIXL_CHECK(can_encode); + CheckEmitPoolForInstruction(info, label); + // We have already checked for pool emission. + pool_policy = MacroEmissionCheckScope::kIgnorePools; + } + MacroEmissionCheckScope guard(this, pool_policy); + cbz(rn, label); + RegisterForwardReference(label); + } + + void Clrex(Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + clrex(cond); + } + void Clrex() { Clrex(al); } + + void Clz(Condition cond, Register rd, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + clz(cond, rd, rm); + } + void Clz(Register rd, Register rm) { Clz(al, rd, rm); } + + void Cmn(Condition cond, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // CMN{}{} , ; T1 + operand.IsPlainRegister() && rn.IsLow() && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + cmn(cond, rn, operand); + } + void Cmn(Register rn, const Operand& operand) { Cmn(al, rn, operand); } + + void Cmp(Condition cond, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // CMP{}{} , # ; T1 + (operand.IsImmediate() && (operand.GetImmediate() <= 255) && + rn.IsLow()) || + // CMP{}{} , ; T1 T2 + (operand.IsPlainRegister() && !rn.IsPC() && + !operand.GetBaseRegister().IsPC()); + ITScope it_scope(this, &cond, guard, can_use_it); + cmp(cond, rn, operand); + } + void Cmp(Register rn, const Operand& operand) { Cmp(al, rn, operand); } + + void Crc32b(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + crc32b(cond, rd, rn, rm); + } + void Crc32b(Register rd, Register rn, Register rm) { Crc32b(al, rd, rn, rm); } + + void Crc32cb(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + crc32cb(cond, rd, rn, rm); + } + void Crc32cb(Register rd, Register rn, Register rm) { + Crc32cb(al, rd, rn, rm); + } + + void Crc32ch(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + crc32ch(cond, rd, rn, rm); + } + void Crc32ch(Register rd, Register rn, Register rm) { + Crc32ch(al, rd, rn, rm); + } + + void Crc32cw(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + crc32cw(cond, rd, rn, rm); + } + void Crc32cw(Register rd, Register rn, Register rm) { + Crc32cw(al, rd, rn, rm); + } + + void Crc32h(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + crc32h(cond, rd, rn, rm); + } + void Crc32h(Register rd, Register rn, Register rm) { Crc32h(al, rd, rn, rm); } + + void Crc32w(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + crc32w(cond, rd, rn, rm); + } + void Crc32w(Register rd, Register rn, Register rm) { Crc32w(al, rd, rn, rm); } + + void Dmb(Condition cond, MemoryBarrier option) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + dmb(cond, option); + } + void Dmb(MemoryBarrier option) { Dmb(al, option); } + + void Dsb(Condition cond, MemoryBarrier option) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + dsb(cond, option); + } + void Dsb(MemoryBarrier option) { Dsb(al, option); } + + void Eor(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + if (cond.Is(al) && rd.Is(rn) && operand.IsImmediate()) { + uint32_t immediate = operand.GetImmediate(); + if (immediate == 0) { + return; + } + if (immediate == 0xffffffff) { + mvn(rd, rn); + return; + } + } + bool can_use_it = + // EOR{} {,} , ; T1 + operand.IsPlainRegister() && rd.Is(rn) && rn.IsLow() && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + eor(cond, rd, rn, operand); + } + void Eor(Register rd, Register rn, const Operand& operand) { + Eor(al, rd, rn, operand); + } + void Eor(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Eor(cond, rd, rn, operand); + break; + case SetFlags: + Eors(cond, rd, rn, operand); + break; + case DontCare: + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + rn.Is(rd) && operand.IsPlainRegister() && + operand.GetBaseRegister().IsLow(); + if (setflags_is_smaller) { + Eors(cond, rd, rn, operand); + } else { + Eor(cond, rd, rn, operand); + } + break; + } + } + void Eor(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Eor(flags, al, rd, rn, operand); + } + + void Eors(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + eors(cond, rd, rn, operand); + } + void Eors(Register rd, Register rn, const Operand& operand) { + Eors(al, rd, rn, operand); + } + + void Fldmdbx(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + fldmdbx(cond, rn, write_back, dreglist); + } + void Fldmdbx(Register rn, WriteBack write_back, DRegisterList dreglist) { + Fldmdbx(al, rn, write_back, dreglist); + } + + void Fldmiax(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + fldmiax(cond, rn, write_back, dreglist); + } + void Fldmiax(Register rn, WriteBack write_back, DRegisterList dreglist) { + Fldmiax(al, rn, write_back, dreglist); + } + + void Fstmdbx(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + fstmdbx(cond, rn, write_back, dreglist); + } + void Fstmdbx(Register rn, WriteBack write_back, DRegisterList dreglist) { + Fstmdbx(al, rn, write_back, dreglist); + } + + void Fstmiax(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + fstmiax(cond, rn, write_back, dreglist); + } + void Fstmiax(Register rn, WriteBack write_back, DRegisterList dreglist) { + Fstmiax(al, rn, write_back, dreglist); + } + + void Hlt(Condition cond, uint32_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + hlt(cond, imm); + } + void Hlt(uint32_t imm) { Hlt(al, imm); } + + void Hvc(Condition cond, uint32_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + hvc(cond, imm); + } + void Hvc(uint32_t imm) { Hvc(al, imm); } + + void Isb(Condition cond, MemoryBarrier option) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + isb(cond, option); + } + void Isb(MemoryBarrier option) { Isb(al, option); } + + void Lda(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + lda(cond, rt, operand); + } + void Lda(Register rt, const MemOperand& operand) { Lda(al, rt, operand); } + + void Ldab(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldab(cond, rt, operand); + } + void Ldab(Register rt, const MemOperand& operand) { Ldab(al, rt, operand); } + + void Ldaex(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldaex(cond, rt, operand); + } + void Ldaex(Register rt, const MemOperand& operand) { Ldaex(al, rt, operand); } + + void Ldaexb(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldaexb(cond, rt, operand); + } + void Ldaexb(Register rt, const MemOperand& operand) { + Ldaexb(al, rt, operand); + } + + void Ldaexd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldaexd(cond, rt, rt2, operand); + } + void Ldaexd(Register rt, Register rt2, const MemOperand& operand) { + Ldaexd(al, rt, rt2, operand); + } + + void Ldaexh(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldaexh(cond, rt, operand); + } + void Ldaexh(Register rt, const MemOperand& operand) { + Ldaexh(al, rt, operand); + } + + void Ldah(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldah(cond, rt, operand); + } + void Ldah(Register rt, const MemOperand& operand) { Ldah(al, rt, operand); } + + void Ldm(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldm(cond, rn, write_back, registers); + } + void Ldm(Register rn, WriteBack write_back, RegisterList registers) { + Ldm(al, rn, write_back, registers); + } + + void Ldmda(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldmda(cond, rn, write_back, registers); + } + void Ldmda(Register rn, WriteBack write_back, RegisterList registers) { + Ldmda(al, rn, write_back, registers); + } + + void Ldmdb(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldmdb(cond, rn, write_back, registers); + } + void Ldmdb(Register rn, WriteBack write_back, RegisterList registers) { + Ldmdb(al, rn, write_back, registers); + } + + void Ldmea(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldmea(cond, rn, write_back, registers); + } + void Ldmea(Register rn, WriteBack write_back, RegisterList registers) { + Ldmea(al, rn, write_back, registers); + } + + void Ldmed(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldmed(cond, rn, write_back, registers); + } + void Ldmed(Register rn, WriteBack write_back, RegisterList registers) { + Ldmed(al, rn, write_back, registers); + } + + void Ldmfa(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldmfa(cond, rn, write_back, registers); + } + void Ldmfa(Register rn, WriteBack write_back, RegisterList registers) { + Ldmfa(al, rn, write_back, registers); + } + + void Ldmfd(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldmfd(cond, rn, write_back, registers); + } + void Ldmfd(Register rn, WriteBack write_back, RegisterList registers) { + Ldmfd(al, rn, write_back, registers); + } + + void Ldmib(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldmib(cond, rn, write_back, registers); + } + void Ldmib(Register rn, WriteBack write_back, RegisterList registers) { + Ldmib(al, rn, write_back, registers); + } + + void Ldr(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // LDR{}{} , [ {, #{+}}] ; T1 + (operand.IsImmediate() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.IsOffsetImmediateWithinRange(0, 124, 4) && + (operand.GetAddrMode() == Offset)) || + // LDR{}{} , [SP{, #{+}}] ; T2 + (operand.IsImmediate() && rt.IsLow() && + operand.GetBaseRegister().IsSP() && + operand.IsOffsetImmediateWithinRange(0, 1020, 4) && + (operand.GetAddrMode() == Offset)) || + // LDR{}{} , [, {+}] ; T1 + (operand.IsPlainRegister() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.GetOffsetRegister().IsLow() && operand.GetSign().IsPlus() && + (operand.GetAddrMode() == Offset)); + ITScope it_scope(this, &cond, guard, can_use_it); + ldr(cond, rt, operand); + } + void Ldr(Register rt, const MemOperand& operand) { Ldr(al, rt, operand); } + + + void Ldrb(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // LDRB{}{} , [ {, #{+}}] ; T1 + (operand.IsImmediate() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.IsOffsetImmediateWithinRange(0, 31) && + (operand.GetAddrMode() == Offset)) || + // LDRB{}{} , [, {+}] ; T1 + (operand.IsPlainRegister() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.GetOffsetRegister().IsLow() && operand.GetSign().IsPlus() && + (operand.GetAddrMode() == Offset)); + ITScope it_scope(this, &cond, guard, can_use_it); + ldrb(cond, rt, operand); + } + void Ldrb(Register rt, const MemOperand& operand) { Ldrb(al, rt, operand); } + + + void Ldrd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldrd(cond, rt, rt2, operand); + } + void Ldrd(Register rt, Register rt2, const MemOperand& operand) { + Ldrd(al, rt, rt2, operand); + } + + + void Ldrex(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldrex(cond, rt, operand); + } + void Ldrex(Register rt, const MemOperand& operand) { Ldrex(al, rt, operand); } + + void Ldrexb(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldrexb(cond, rt, operand); + } + void Ldrexb(Register rt, const MemOperand& operand) { + Ldrexb(al, rt, operand); + } + + void Ldrexd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldrexd(cond, rt, rt2, operand); + } + void Ldrexd(Register rt, Register rt2, const MemOperand& operand) { + Ldrexd(al, rt, rt2, operand); + } + + void Ldrexh(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ldrexh(cond, rt, operand); + } + void Ldrexh(Register rt, const MemOperand& operand) { + Ldrexh(al, rt, operand); + } + + void Ldrh(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // LDRH{}{} , [ {, #{+}}] ; T1 + (operand.IsImmediate() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.IsOffsetImmediateWithinRange(0, 62, 2) && + (operand.GetAddrMode() == Offset)) || + // LDRH{}{} , [, {+}] ; T1 + (operand.IsPlainRegister() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.GetOffsetRegister().IsLow() && operand.GetSign().IsPlus() && + (operand.GetAddrMode() == Offset)); + ITScope it_scope(this, &cond, guard, can_use_it); + ldrh(cond, rt, operand); + } + void Ldrh(Register rt, const MemOperand& operand) { Ldrh(al, rt, operand); } + + + void Ldrsb(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // LDRSB{}{} , [, {+}] ; T1 + operand.IsPlainRegister() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.GetOffsetRegister().IsLow() && operand.GetSign().IsPlus() && + (operand.GetAddrMode() == Offset); + ITScope it_scope(this, &cond, guard, can_use_it); + ldrsb(cond, rt, operand); + } + void Ldrsb(Register rt, const MemOperand& operand) { Ldrsb(al, rt, operand); } + + + void Ldrsh(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // LDRSH{}{} , [, {+}] ; T1 + operand.IsPlainRegister() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.GetOffsetRegister().IsLow() && operand.GetSign().IsPlus() && + (operand.GetAddrMode() == Offset); + ITScope it_scope(this, &cond, guard, can_use_it); + ldrsh(cond, rt, operand); + } + void Ldrsh(Register rt, const MemOperand& operand) { Ldrsh(al, rt, operand); } + + + void Lsl(Condition cond, Register rd, Register rm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // LSL{} {,} , # ; T2 + (operand.IsImmediate() && (operand.GetImmediate() >= 1) && + (operand.GetImmediate() <= 31) && rd.IsLow() && rm.IsLow()) || + // LSL{} {,} , ; T1 + (operand.IsPlainRegister() && rd.Is(rm) && rd.IsLow() && + operand.GetBaseRegister().IsLow()); + ITScope it_scope(this, &cond, guard, can_use_it); + lsl(cond, rd, rm, operand); + } + void Lsl(Register rd, Register rm, const Operand& operand) { + Lsl(al, rd, rm, operand); + } + void Lsl(FlagsUpdate flags, + Condition cond, + Register rd, + Register rm, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Lsl(cond, rd, rm, operand); + break; + case SetFlags: + Lsls(cond, rd, rm, operand); + break; + case DontCare: + bool setflags_is_smaller = + IsUsingT32() && cond.Is(al) && rd.IsLow() && rm.IsLow() && + ((operand.IsImmediate() && (operand.GetImmediate() >= 1) && + (operand.GetImmediate() < 32)) || + (operand.IsPlainRegister() && rd.Is(rm))); + if (setflags_is_smaller) { + Lsls(cond, rd, rm, operand); + } else { + Lsl(cond, rd, rm, operand); + } + break; + } + } + void Lsl(FlagsUpdate flags, + Register rd, + Register rm, + const Operand& operand) { + Lsl(flags, al, rd, rm, operand); + } + + void Lsls(Condition cond, Register rd, Register rm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + lsls(cond, rd, rm, operand); + } + void Lsls(Register rd, Register rm, const Operand& operand) { + Lsls(al, rd, rm, operand); + } + + void Lsr(Condition cond, Register rd, Register rm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // LSR{} {,} , # ; T2 + (operand.IsImmediate() && (operand.GetImmediate() >= 1) && + (operand.GetImmediate() <= 32) && rd.IsLow() && rm.IsLow()) || + // LSR{} {,} , ; T1 + (operand.IsPlainRegister() && rd.Is(rm) && rd.IsLow() && + operand.GetBaseRegister().IsLow()); + ITScope it_scope(this, &cond, guard, can_use_it); + lsr(cond, rd, rm, operand); + } + void Lsr(Register rd, Register rm, const Operand& operand) { + Lsr(al, rd, rm, operand); + } + void Lsr(FlagsUpdate flags, + Condition cond, + Register rd, + Register rm, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Lsr(cond, rd, rm, operand); + break; + case SetFlags: + Lsrs(cond, rd, rm, operand); + break; + case DontCare: + bool setflags_is_smaller = + IsUsingT32() && cond.Is(al) && rd.IsLow() && rm.IsLow() && + ((operand.IsImmediate() && (operand.GetImmediate() >= 1) && + (operand.GetImmediate() <= 32)) || + (operand.IsPlainRegister() && rd.Is(rm))); + if (setflags_is_smaller) { + Lsrs(cond, rd, rm, operand); + } else { + Lsr(cond, rd, rm, operand); + } + break; + } + } + void Lsr(FlagsUpdate flags, + Register rd, + Register rm, + const Operand& operand) { + Lsr(flags, al, rd, rm, operand); + } + + void Lsrs(Condition cond, Register rd, Register rm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + lsrs(cond, rd, rm, operand); + } + void Lsrs(Register rd, Register rm, const Operand& operand) { + Lsrs(al, rd, rm, operand); + } + + void Mla(Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + mla(cond, rd, rn, rm, ra); + } + void Mla(Register rd, Register rn, Register rm, Register ra) { + Mla(al, rd, rn, rm, ra); + } + void Mla(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + Register rm, + Register ra) { + switch (flags) { + case LeaveFlags: + Mla(cond, rd, rn, rm, ra); + break; + case SetFlags: + Mlas(cond, rd, rn, rm, ra); + break; + case DontCare: + Mla(cond, rd, rn, rm, ra); + break; + } + } + void Mla( + FlagsUpdate flags, Register rd, Register rn, Register rm, Register ra) { + Mla(flags, al, rd, rn, rm, ra); + } + + void Mlas( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + mlas(cond, rd, rn, rm, ra); + } + void Mlas(Register rd, Register rn, Register rm, Register ra) { + Mlas(al, rd, rn, rm, ra); + } + + void Mls(Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + mls(cond, rd, rn, rm, ra); + } + void Mls(Register rd, Register rn, Register rm, Register ra) { + Mls(al, rd, rn, rm, ra); + } + + void Mov(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + if (operand.IsPlainRegister() && rd.Is(operand.GetBaseRegister())) { + return; + } + bool can_use_it = + // MOV{} , # ; T1 + (operand.IsImmediate() && rd.IsLow() && + (operand.GetImmediate() <= 255)) || + // MOV{}{} , ; T1 + (operand.IsPlainRegister() && !rd.IsPC() && + !operand.GetBaseRegister().IsPC()) || + // MOV{} , {, #} ; T2 + (operand.IsImmediateShiftedRegister() && rd.IsLow() && + operand.GetBaseRegister().IsLow() && + (operand.GetShift().Is(LSL) || operand.GetShift().Is(LSR) || + operand.GetShift().Is(ASR))) || + // MOV{} , , LSL ; T1 + // MOV{} , , LSR ; T1 + // MOV{} , , ASR ; T1 + // MOV{} , , ROR ; T1 + (operand.IsRegisterShiftedRegister() && + rd.Is(operand.GetBaseRegister()) && rd.IsLow() && + (operand.GetShift().Is(LSL) || operand.GetShift().Is(LSR) || + operand.GetShift().Is(ASR) || operand.GetShift().Is(ROR)) && + operand.GetShiftRegister().IsLow()); + ITScope it_scope(this, &cond, guard, can_use_it); + mov(cond, rd, operand); + } + void Mov(Register rd, const Operand& operand) { Mov(al, rd, operand); } + void Mov(FlagsUpdate flags, + Condition cond, + Register rd, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Mov(cond, rd, operand); + break; + case SetFlags: + Movs(cond, rd, operand); + break; + case DontCare: + if (operand.IsPlainRegister() && rd.Is(operand.GetBaseRegister())) { + return; + } + bool setflags_is_smaller = + IsUsingT32() && cond.Is(al) && + ((operand.IsImmediateShiftedRegister() && rd.IsLow() && + operand.GetBaseRegister().IsLow() && + (operand.GetShiftAmount() >= 1) && + (((operand.GetShiftAmount() <= 32) && + ((operand.GetShift().IsLSR() || operand.GetShift().IsASR()))) || + ((operand.GetShiftAmount() < 32) && + operand.GetShift().IsLSL()))) || + (operand.IsRegisterShiftedRegister() && rd.IsLow() && + operand.GetBaseRegister().Is(rd) && + operand.GetShiftRegister().IsLow() && + (operand.GetShift().IsLSL() || operand.GetShift().IsLSR() || + operand.GetShift().IsASR() || operand.GetShift().IsROR())) || + (operand.IsImmediate() && rd.IsLow() && + (operand.GetImmediate() < 256))); + if (setflags_is_smaller) { + Movs(cond, rd, operand); + } else { + Mov(cond, rd, operand); + } + break; + } + } + void Mov(FlagsUpdate flags, Register rd, const Operand& operand) { + Mov(flags, al, rd, operand); + } + + void Movs(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + movs(cond, rd, operand); + } + void Movs(Register rd, const Operand& operand) { Movs(al, rd, operand); } + + void Movt(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + movt(cond, rd, operand); + } + void Movt(Register rd, const Operand& operand) { Movt(al, rd, operand); } + + void Mrs(Condition cond, Register rd, SpecialRegister spec_reg) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + mrs(cond, rd, spec_reg); + } + void Mrs(Register rd, SpecialRegister spec_reg) { Mrs(al, rd, spec_reg); } + + void Msr(Condition cond, + MaskedSpecialRegister spec_reg, + const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + msr(cond, spec_reg, operand); + } + void Msr(MaskedSpecialRegister spec_reg, const Operand& operand) { + Msr(al, spec_reg, operand); + } + + void Mul(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // MUL{} , {, } ; T1 + rd.Is(rm) && rn.IsLow() && rm.IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + mul(cond, rd, rn, rm); + } + void Mul(Register rd, Register rn, Register rm) { Mul(al, rd, rn, rm); } + void Mul(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + Register rm) { + switch (flags) { + case LeaveFlags: + Mul(cond, rd, rn, rm); + break; + case SetFlags: + Muls(cond, rd, rn, rm); + break; + case DontCare: + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + rn.IsLow() && rm.Is(rd); + if (setflags_is_smaller) { + Muls(cond, rd, rn, rm); + } else { + Mul(cond, rd, rn, rm); + } + break; + } + } + void Mul(FlagsUpdate flags, Register rd, Register rn, Register rm) { + Mul(flags, al, rd, rn, rm); + } + + void Muls(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + muls(cond, rd, rn, rm); + } + void Muls(Register rd, Register rn, Register rm) { Muls(al, rd, rn, rm); } + + void Mvn(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // MVN{} , ; T1 + operand.IsPlainRegister() && rd.IsLow() && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + mvn(cond, rd, operand); + } + void Mvn(Register rd, const Operand& operand) { Mvn(al, rd, operand); } + void Mvn(FlagsUpdate flags, + Condition cond, + Register rd, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Mvn(cond, rd, operand); + break; + case SetFlags: + Mvns(cond, rd, operand); + break; + case DontCare: + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + operand.IsPlainRegister() && + operand.GetBaseRegister().IsLow(); + if (setflags_is_smaller) { + Mvns(cond, rd, operand); + } else { + Mvn(cond, rd, operand); + } + break; + } + } + void Mvn(FlagsUpdate flags, Register rd, const Operand& operand) { + Mvn(flags, al, rd, operand); + } + + void Mvns(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + mvns(cond, rd, operand); + } + void Mvns(Register rd, const Operand& operand) { Mvns(al, rd, operand); } + + void Nop(Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + nop(cond); + } + void Nop() { Nop(al); } + + void Orn(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + if (cond.Is(al) && operand.IsImmediate()) { + uint32_t immediate = operand.GetImmediate(); + if (immediate == 0) { + mvn(rd, 0); + return; + } + if ((immediate == 0xffffffff) && rd.Is(rn)) { + return; + } + } + ITScope it_scope(this, &cond, guard); + orn(cond, rd, rn, operand); + } + void Orn(Register rd, Register rn, const Operand& operand) { + Orn(al, rd, rn, operand); + } + void Orn(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Orn(cond, rd, rn, operand); + break; + case SetFlags: + Orns(cond, rd, rn, operand); + break; + case DontCare: + Orn(cond, rd, rn, operand); + break; + } + } + void Orn(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Orn(flags, al, rd, rn, operand); + } + + void Orns(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + orns(cond, rd, rn, operand); + } + void Orns(Register rd, Register rn, const Operand& operand) { + Orns(al, rd, rn, operand); + } + + void Orr(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + if (rd.Is(rn) && operand.IsPlainRegister() && + rd.Is(operand.GetBaseRegister())) { + return; + } + if (cond.Is(al) && operand.IsImmediate()) { + uint32_t immediate = operand.GetImmediate(); + if ((immediate == 0) && rd.Is(rn)) { + return; + } + if (immediate == 0xffffffff) { + mvn(rd, 0); + return; + } + } + bool can_use_it = + // ORR{} {,} , ; T1 + operand.IsPlainRegister() && rd.Is(rn) && rn.IsLow() && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + orr(cond, rd, rn, operand); + } + void Orr(Register rd, Register rn, const Operand& operand) { + Orr(al, rd, rn, operand); + } + void Orr(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Orr(cond, rd, rn, operand); + break; + case SetFlags: + Orrs(cond, rd, rn, operand); + break; + case DontCare: + if (operand.IsPlainRegister() && rd.Is(rn) && + rd.Is(operand.GetBaseRegister())) { + return; + } + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + rn.Is(rd) && operand.IsPlainRegister() && + operand.GetBaseRegister().IsLow(); + if (setflags_is_smaller) { + Orrs(cond, rd, rn, operand); + } else { + Orr(cond, rd, rn, operand); + } + break; + } + } + void Orr(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Orr(flags, al, rd, rn, operand); + } + + void Orrs(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + orrs(cond, rd, rn, operand); + } + void Orrs(Register rd, Register rn, const Operand& operand) { + Orrs(al, rd, rn, operand); + } + + void Pkhbt(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + pkhbt(cond, rd, rn, operand); + } + void Pkhbt(Register rd, Register rn, const Operand& operand) { + Pkhbt(al, rd, rn, operand); + } + + void Pkhtb(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + pkhtb(cond, rd, rn, operand); + } + void Pkhtb(Register rd, Register rn, const Operand& operand) { + Pkhtb(al, rd, rn, operand); + } + + + void Pld(Condition cond, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + pld(cond, operand); + } + void Pld(const MemOperand& operand) { Pld(al, operand); } + + void Pldw(Condition cond, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + pldw(cond, operand); + } + void Pldw(const MemOperand& operand) { Pldw(al, operand); } + + void Pli(Condition cond, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + pli(cond, operand); + } + void Pli(const MemOperand& operand) { Pli(al, operand); } + + + void Pop(Condition cond, RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + pop(cond, registers); + } + void Pop(RegisterList registers) { Pop(al, registers); } + + void Pop(Condition cond, Register rt) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + pop(cond, rt); + } + void Pop(Register rt) { Pop(al, rt); } + + void Push(Condition cond, RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + push(cond, registers); + } + void Push(RegisterList registers) { Push(al, registers); } + + void Push(Condition cond, Register rt) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + push(cond, rt); + } + void Push(Register rt) { Push(al, rt); } + + void Qadd(Condition cond, Register rd, Register rm, Register rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qadd(cond, rd, rm, rn); + } + void Qadd(Register rd, Register rm, Register rn) { Qadd(al, rd, rm, rn); } + + void Qadd16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qadd16(cond, rd, rn, rm); + } + void Qadd16(Register rd, Register rn, Register rm) { Qadd16(al, rd, rn, rm); } + + void Qadd8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qadd8(cond, rd, rn, rm); + } + void Qadd8(Register rd, Register rn, Register rm) { Qadd8(al, rd, rn, rm); } + + void Qasx(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qasx(cond, rd, rn, rm); + } + void Qasx(Register rd, Register rn, Register rm) { Qasx(al, rd, rn, rm); } + + void Qdadd(Condition cond, Register rd, Register rm, Register rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qdadd(cond, rd, rm, rn); + } + void Qdadd(Register rd, Register rm, Register rn) { Qdadd(al, rd, rm, rn); } + + void Qdsub(Condition cond, Register rd, Register rm, Register rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qdsub(cond, rd, rm, rn); + } + void Qdsub(Register rd, Register rm, Register rn) { Qdsub(al, rd, rm, rn); } + + void Qsax(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qsax(cond, rd, rn, rm); + } + void Qsax(Register rd, Register rn, Register rm) { Qsax(al, rd, rn, rm); } + + void Qsub(Condition cond, Register rd, Register rm, Register rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qsub(cond, rd, rm, rn); + } + void Qsub(Register rd, Register rm, Register rn) { Qsub(al, rd, rm, rn); } + + void Qsub16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qsub16(cond, rd, rn, rm); + } + void Qsub16(Register rd, Register rn, Register rm) { Qsub16(al, rd, rn, rm); } + + void Qsub8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + qsub8(cond, rd, rn, rm); + } + void Qsub8(Register rd, Register rn, Register rm) { Qsub8(al, rd, rn, rm); } + + void Rbit(Condition cond, Register rd, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + rbit(cond, rd, rm); + } + void Rbit(Register rd, Register rm) { Rbit(al, rd, rm); } + + void Rev(Condition cond, Register rd, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + rev(cond, rd, rm); + } + void Rev(Register rd, Register rm) { Rev(al, rd, rm); } + + void Rev16(Condition cond, Register rd, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + rev16(cond, rd, rm); + } + void Rev16(Register rd, Register rm) { Rev16(al, rd, rm); } + + void Revsh(Condition cond, Register rd, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + revsh(cond, rd, rm); + } + void Revsh(Register rd, Register rm) { Revsh(al, rd, rm); } + + void Ror(Condition cond, Register rd, Register rm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // ROR{} {,} , ; T1 + operand.IsPlainRegister() && rd.Is(rm) && rd.IsLow() && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + ror(cond, rd, rm, operand); + } + void Ror(Register rd, Register rm, const Operand& operand) { + Ror(al, rd, rm, operand); + } + void Ror(FlagsUpdate flags, + Condition cond, + Register rd, + Register rm, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Ror(cond, rd, rm, operand); + break; + case SetFlags: + Rors(cond, rd, rm, operand); + break; + case DontCare: + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + rm.IsLow() && operand.IsPlainRegister() && + rd.Is(rm); + if (setflags_is_smaller) { + Rors(cond, rd, rm, operand); + } else { + Ror(cond, rd, rm, operand); + } + break; + } + } + void Ror(FlagsUpdate flags, + Register rd, + Register rm, + const Operand& operand) { + Ror(flags, al, rd, rm, operand); + } + + void Rors(Condition cond, Register rd, Register rm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + rors(cond, rd, rm, operand); + } + void Rors(Register rd, Register rm, const Operand& operand) { + Rors(al, rd, rm, operand); + } + + void Rrx(Condition cond, Register rd, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + rrx(cond, rd, rm); + } + void Rrx(Register rd, Register rm) { Rrx(al, rd, rm); } + void Rrx(FlagsUpdate flags, Condition cond, Register rd, Register rm) { + switch (flags) { + case LeaveFlags: + Rrx(cond, rd, rm); + break; + case SetFlags: + Rrxs(cond, rd, rm); + break; + case DontCare: + Rrx(cond, rd, rm); + break; + } + } + void Rrx(FlagsUpdate flags, Register rd, Register rm) { + Rrx(flags, al, rd, rm); + } + + void Rrxs(Condition cond, Register rd, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + rrxs(cond, rd, rm); + } + void Rrxs(Register rd, Register rm) { Rrxs(al, rd, rm); } + + void Rsb(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // RSB{} {, }, #0 ; T1 + operand.IsImmediate() && rd.IsLow() && rn.IsLow() && + (operand.GetImmediate() == 0); + ITScope it_scope(this, &cond, guard, can_use_it); + rsb(cond, rd, rn, operand); + } + void Rsb(Register rd, Register rn, const Operand& operand) { + Rsb(al, rd, rn, operand); + } + void Rsb(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Rsb(cond, rd, rn, operand); + break; + case SetFlags: + Rsbs(cond, rd, rn, operand); + break; + case DontCare: + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + rn.IsLow() && operand.IsImmediate() && + (operand.GetImmediate() == 0); + if (setflags_is_smaller) { + Rsbs(cond, rd, rn, operand); + } else { + Rsb(cond, rd, rn, operand); + } + break; + } + } + void Rsb(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Rsb(flags, al, rd, rn, operand); + } + + void Rsbs(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + rsbs(cond, rd, rn, operand); + } + void Rsbs(Register rd, Register rn, const Operand& operand) { + Rsbs(al, rd, rn, operand); + } + + void Rsc(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + rsc(cond, rd, rn, operand); + } + void Rsc(Register rd, Register rn, const Operand& operand) { + Rsc(al, rd, rn, operand); + } + void Rsc(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Rsc(cond, rd, rn, operand); + break; + case SetFlags: + Rscs(cond, rd, rn, operand); + break; + case DontCare: + Rsc(cond, rd, rn, operand); + break; + } + } + void Rsc(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Rsc(flags, al, rd, rn, operand); + } + + void Rscs(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + rscs(cond, rd, rn, operand); + } + void Rscs(Register rd, Register rn, const Operand& operand) { + Rscs(al, rd, rn, operand); + } + + void Sadd16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sadd16(cond, rd, rn, rm); + } + void Sadd16(Register rd, Register rn, Register rm) { Sadd16(al, rd, rn, rm); } + + void Sadd8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sadd8(cond, rd, rn, rm); + } + void Sadd8(Register rd, Register rn, Register rm) { Sadd8(al, rd, rn, rm); } + + void Sasx(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sasx(cond, rd, rn, rm); + } + void Sasx(Register rd, Register rn, Register rm) { Sasx(al, rd, rn, rm); } + + void Sbc(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // SBC{} {,} , ; T1 + operand.IsPlainRegister() && rn.IsLow() && rd.Is(rn) && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + sbc(cond, rd, rn, operand); + } + void Sbc(Register rd, Register rn, const Operand& operand) { + Sbc(al, rd, rn, operand); + } + void Sbc(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Sbc(cond, rd, rn, operand); + break; + case SetFlags: + Sbcs(cond, rd, rn, operand); + break; + case DontCare: + bool setflags_is_smaller = IsUsingT32() && cond.Is(al) && rd.IsLow() && + rn.Is(rd) && operand.IsPlainRegister() && + operand.GetBaseRegister().IsLow(); + if (setflags_is_smaller) { + Sbcs(cond, rd, rn, operand); + } else { + Sbc(cond, rd, rn, operand); + } + break; + } + } + void Sbc(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Sbc(flags, al, rd, rn, operand); + } + + void Sbcs(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sbcs(cond, rd, rn, operand); + } + void Sbcs(Register rd, Register rn, const Operand& operand) { + Sbcs(al, rd, rn, operand); + } + + void Sbfx( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sbfx(cond, rd, rn, lsb, width); + } + void Sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width) { + Sbfx(al, rd, rn, lsb, width); + } + + void Sdiv(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sdiv(cond, rd, rn, rm); + } + void Sdiv(Register rd, Register rn, Register rm) { Sdiv(al, rd, rn, rm); } + + void Sel(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sel(cond, rd, rn, rm); + } + void Sel(Register rd, Register rn, Register rm) { Sel(al, rd, rn, rm); } + + void Shadd16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + shadd16(cond, rd, rn, rm); + } + void Shadd16(Register rd, Register rn, Register rm) { + Shadd16(al, rd, rn, rm); + } + + void Shadd8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + shadd8(cond, rd, rn, rm); + } + void Shadd8(Register rd, Register rn, Register rm) { Shadd8(al, rd, rn, rm); } + + void Shasx(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + shasx(cond, rd, rn, rm); + } + void Shasx(Register rd, Register rn, Register rm) { Shasx(al, rd, rn, rm); } + + void Shsax(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + shsax(cond, rd, rn, rm); + } + void Shsax(Register rd, Register rn, Register rm) { Shsax(al, rd, rn, rm); } + + void Shsub16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + shsub16(cond, rd, rn, rm); + } + void Shsub16(Register rd, Register rn, Register rm) { + Shsub16(al, rd, rn, rm); + } + + void Shsub8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + shsub8(cond, rd, rn, rm); + } + void Shsub8(Register rd, Register rn, Register rm) { Shsub8(al, rd, rn, rm); } + + void Smlabb( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlabb(cond, rd, rn, rm, ra); + } + void Smlabb(Register rd, Register rn, Register rm, Register ra) { + Smlabb(al, rd, rn, rm, ra); + } + + void Smlabt( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlabt(cond, rd, rn, rm, ra); + } + void Smlabt(Register rd, Register rn, Register rm, Register ra) { + Smlabt(al, rd, rn, rm, ra); + } + + void Smlad( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlad(cond, rd, rn, rm, ra); + } + void Smlad(Register rd, Register rn, Register rm, Register ra) { + Smlad(al, rd, rn, rm, ra); + } + + void Smladx( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smladx(cond, rd, rn, rm, ra); + } + void Smladx(Register rd, Register rn, Register rm, Register ra) { + Smladx(al, rd, rn, rm, ra); + } + + void Smlal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlal(cond, rdlo, rdhi, rn, rm); + } + void Smlal(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlal(al, rdlo, rdhi, rn, rm); + } + + void Smlalbb( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlalbb(cond, rdlo, rdhi, rn, rm); + } + void Smlalbb(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlalbb(al, rdlo, rdhi, rn, rm); + } + + void Smlalbt( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlalbt(cond, rdlo, rdhi, rn, rm); + } + void Smlalbt(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlalbt(al, rdlo, rdhi, rn, rm); + } + + void Smlald( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlald(cond, rdlo, rdhi, rn, rm); + } + void Smlald(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlald(al, rdlo, rdhi, rn, rm); + } + + void Smlaldx( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlaldx(cond, rdlo, rdhi, rn, rm); + } + void Smlaldx(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlaldx(al, rdlo, rdhi, rn, rm); + } + + void Smlals( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlals(cond, rdlo, rdhi, rn, rm); + } + void Smlals(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlals(al, rdlo, rdhi, rn, rm); + } + + void Smlaltb( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlaltb(cond, rdlo, rdhi, rn, rm); + } + void Smlaltb(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlaltb(al, rdlo, rdhi, rn, rm); + } + + void Smlaltt( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlaltt(cond, rdlo, rdhi, rn, rm); + } + void Smlaltt(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlaltt(al, rdlo, rdhi, rn, rm); + } + + void Smlatb( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlatb(cond, rd, rn, rm, ra); + } + void Smlatb(Register rd, Register rn, Register rm, Register ra) { + Smlatb(al, rd, rn, rm, ra); + } + + void Smlatt( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlatt(cond, rd, rn, rm, ra); + } + void Smlatt(Register rd, Register rn, Register rm, Register ra) { + Smlatt(al, rd, rn, rm, ra); + } + + void Smlawb( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlawb(cond, rd, rn, rm, ra); + } + void Smlawb(Register rd, Register rn, Register rm, Register ra) { + Smlawb(al, rd, rn, rm, ra); + } + + void Smlawt( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlawt(cond, rd, rn, rm, ra); + } + void Smlawt(Register rd, Register rn, Register rm, Register ra) { + Smlawt(al, rd, rn, rm, ra); + } + + void Smlsd( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlsd(cond, rd, rn, rm, ra); + } + void Smlsd(Register rd, Register rn, Register rm, Register ra) { + Smlsd(al, rd, rn, rm, ra); + } + + void Smlsdx( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlsdx(cond, rd, rn, rm, ra); + } + void Smlsdx(Register rd, Register rn, Register rm, Register ra) { + Smlsdx(al, rd, rn, rm, ra); + } + + void Smlsld( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlsld(cond, rdlo, rdhi, rn, rm); + } + void Smlsld(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlsld(al, rdlo, rdhi, rn, rm); + } + + void Smlsldx( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smlsldx(cond, rdlo, rdhi, rn, rm); + } + void Smlsldx(Register rdlo, Register rdhi, Register rn, Register rm) { + Smlsldx(al, rdlo, rdhi, rn, rm); + } + + void Smmla( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smmla(cond, rd, rn, rm, ra); + } + void Smmla(Register rd, Register rn, Register rm, Register ra) { + Smmla(al, rd, rn, rm, ra); + } + + void Smmlar( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smmlar(cond, rd, rn, rm, ra); + } + void Smmlar(Register rd, Register rn, Register rm, Register ra) { + Smmlar(al, rd, rn, rm, ra); + } + + void Smmls( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smmls(cond, rd, rn, rm, ra); + } + void Smmls(Register rd, Register rn, Register rm, Register ra) { + Smmls(al, rd, rn, rm, ra); + } + + void Smmlsr( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smmlsr(cond, rd, rn, rm, ra); + } + void Smmlsr(Register rd, Register rn, Register rm, Register ra) { + Smmlsr(al, rd, rn, rm, ra); + } + + void Smmul(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smmul(cond, rd, rn, rm); + } + void Smmul(Register rd, Register rn, Register rm) { Smmul(al, rd, rn, rm); } + + void Smmulr(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smmulr(cond, rd, rn, rm); + } + void Smmulr(Register rd, Register rn, Register rm) { Smmulr(al, rd, rn, rm); } + + void Smuad(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smuad(cond, rd, rn, rm); + } + void Smuad(Register rd, Register rn, Register rm) { Smuad(al, rd, rn, rm); } + + void Smuadx(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smuadx(cond, rd, rn, rm); + } + void Smuadx(Register rd, Register rn, Register rm) { Smuadx(al, rd, rn, rm); } + + void Smulbb(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smulbb(cond, rd, rn, rm); + } + void Smulbb(Register rd, Register rn, Register rm) { Smulbb(al, rd, rn, rm); } + + void Smulbt(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smulbt(cond, rd, rn, rm); + } + void Smulbt(Register rd, Register rn, Register rm) { Smulbt(al, rd, rn, rm); } + + void Smull( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smull(cond, rdlo, rdhi, rn, rm); + } + void Smull(Register rdlo, Register rdhi, Register rn, Register rm) { + Smull(al, rdlo, rdhi, rn, rm); + } + void Smull(FlagsUpdate flags, + Condition cond, + Register rdlo, + Register rdhi, + Register rn, + Register rm) { + switch (flags) { + case LeaveFlags: + Smull(cond, rdlo, rdhi, rn, rm); + break; + case SetFlags: + Smulls(cond, rdlo, rdhi, rn, rm); + break; + case DontCare: + Smull(cond, rdlo, rdhi, rn, rm); + break; + } + } + void Smull(FlagsUpdate flags, + Register rdlo, + Register rdhi, + Register rn, + Register rm) { + Smull(flags, al, rdlo, rdhi, rn, rm); + } + + void Smulls( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smulls(cond, rdlo, rdhi, rn, rm); + } + void Smulls(Register rdlo, Register rdhi, Register rn, Register rm) { + Smulls(al, rdlo, rdhi, rn, rm); + } + + void Smultb(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smultb(cond, rd, rn, rm); + } + void Smultb(Register rd, Register rn, Register rm) { Smultb(al, rd, rn, rm); } + + void Smultt(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smultt(cond, rd, rn, rm); + } + void Smultt(Register rd, Register rn, Register rm) { Smultt(al, rd, rn, rm); } + + void Smulwb(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smulwb(cond, rd, rn, rm); + } + void Smulwb(Register rd, Register rn, Register rm) { Smulwb(al, rd, rn, rm); } + + void Smulwt(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smulwt(cond, rd, rn, rm); + } + void Smulwt(Register rd, Register rn, Register rm) { Smulwt(al, rd, rn, rm); } + + void Smusd(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smusd(cond, rd, rn, rm); + } + void Smusd(Register rd, Register rn, Register rm) { Smusd(al, rd, rn, rm); } + + void Smusdx(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + smusdx(cond, rd, rn, rm); + } + void Smusdx(Register rd, Register rn, Register rm) { Smusdx(al, rd, rn, rm); } + + void Ssat(Condition cond, Register rd, uint32_t imm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ssat(cond, rd, imm, operand); + } + void Ssat(Register rd, uint32_t imm, const Operand& operand) { + Ssat(al, rd, imm, operand); + } + + void Ssat16(Condition cond, Register rd, uint32_t imm, Register rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ssat16(cond, rd, imm, rn); + } + void Ssat16(Register rd, uint32_t imm, Register rn) { + Ssat16(al, rd, imm, rn); + } + + void Ssax(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ssax(cond, rd, rn, rm); + } + void Ssax(Register rd, Register rn, Register rm) { Ssax(al, rd, rn, rm); } + + void Ssub16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ssub16(cond, rd, rn, rm); + } + void Ssub16(Register rd, Register rn, Register rm) { Ssub16(al, rd, rn, rm); } + + void Ssub8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ssub8(cond, rd, rn, rm); + } + void Ssub8(Register rd, Register rn, Register rm) { Ssub8(al, rd, rn, rm); } + + void Stl(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stl(cond, rt, operand); + } + void Stl(Register rt, const MemOperand& operand) { Stl(al, rt, operand); } + + void Stlb(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stlb(cond, rt, operand); + } + void Stlb(Register rt, const MemOperand& operand) { Stlb(al, rt, operand); } + + void Stlex(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stlex(cond, rd, rt, operand); + } + void Stlex(Register rd, Register rt, const MemOperand& operand) { + Stlex(al, rd, rt, operand); + } + + void Stlexb(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stlexb(cond, rd, rt, operand); + } + void Stlexb(Register rd, Register rt, const MemOperand& operand) { + Stlexb(al, rd, rt, operand); + } + + void Stlexd(Condition cond, + Register rd, + Register rt, + Register rt2, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stlexd(cond, rd, rt, rt2, operand); + } + void Stlexd(Register rd, + Register rt, + Register rt2, + const MemOperand& operand) { + Stlexd(al, rd, rt, rt2, operand); + } + + void Stlexh(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stlexh(cond, rd, rt, operand); + } + void Stlexh(Register rd, Register rt, const MemOperand& operand) { + Stlexh(al, rd, rt, operand); + } + + void Stlh(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stlh(cond, rt, operand); + } + void Stlh(Register rt, const MemOperand& operand) { Stlh(al, rt, operand); } + + void Stm(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stm(cond, rn, write_back, registers); + } + void Stm(Register rn, WriteBack write_back, RegisterList registers) { + Stm(al, rn, write_back, registers); + } + + void Stmda(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stmda(cond, rn, write_back, registers); + } + void Stmda(Register rn, WriteBack write_back, RegisterList registers) { + Stmda(al, rn, write_back, registers); + } + + void Stmdb(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stmdb(cond, rn, write_back, registers); + } + void Stmdb(Register rn, WriteBack write_back, RegisterList registers) { + Stmdb(al, rn, write_back, registers); + } + + void Stmea(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stmea(cond, rn, write_back, registers); + } + void Stmea(Register rn, WriteBack write_back, RegisterList registers) { + Stmea(al, rn, write_back, registers); + } + + void Stmed(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stmed(cond, rn, write_back, registers); + } + void Stmed(Register rn, WriteBack write_back, RegisterList registers) { + Stmed(al, rn, write_back, registers); + } + + void Stmfa(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stmfa(cond, rn, write_back, registers); + } + void Stmfa(Register rn, WriteBack write_back, RegisterList registers) { + Stmfa(al, rn, write_back, registers); + } + + void Stmfd(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stmfd(cond, rn, write_back, registers); + } + void Stmfd(Register rn, WriteBack write_back, RegisterList registers) { + Stmfd(al, rn, write_back, registers); + } + + void Stmib(Condition cond, + Register rn, + WriteBack write_back, + RegisterList registers) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(registers)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + stmib(cond, rn, write_back, registers); + } + void Stmib(Register rn, WriteBack write_back, RegisterList registers) { + Stmib(al, rn, write_back, registers); + } + + void Str(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // STR{}{} , [ {, #{+}}] ; T1 + (operand.IsImmediate() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.IsOffsetImmediateWithinRange(0, 124, 4) && + (operand.GetAddrMode() == Offset)) || + // STR{}{} , [SP{, #{+}}] ; T2 + (operand.IsImmediate() && rt.IsLow() && + operand.GetBaseRegister().IsSP() && + operand.IsOffsetImmediateWithinRange(0, 1020, 4) && + (operand.GetAddrMode() == Offset)) || + // STR{}{} , [, {+}] ; T1 + (operand.IsPlainRegister() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.GetOffsetRegister().IsLow() && operand.GetSign().IsPlus() && + (operand.GetAddrMode() == Offset)); + ITScope it_scope(this, &cond, guard, can_use_it); + str(cond, rt, operand); + } + void Str(Register rt, const MemOperand& operand) { Str(al, rt, operand); } + + void Strb(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // STRB{}{} , [ {, #{+}}] ; T1 + (operand.IsImmediate() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.IsOffsetImmediateWithinRange(0, 31) && + (operand.GetAddrMode() == Offset)) || + // STRB{}{} , [, {+}] ; T1 + (operand.IsPlainRegister() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.GetOffsetRegister().IsLow() && operand.GetSign().IsPlus() && + (operand.GetAddrMode() == Offset)); + ITScope it_scope(this, &cond, guard, can_use_it); + strb(cond, rt, operand); + } + void Strb(Register rt, const MemOperand& operand) { Strb(al, rt, operand); } + + void Strd(Condition cond, + Register rt, + Register rt2, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + strd(cond, rt, rt2, operand); + } + void Strd(Register rt, Register rt2, const MemOperand& operand) { + Strd(al, rt, rt2, operand); + } + + void Strex(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + strex(cond, rd, rt, operand); + } + void Strex(Register rd, Register rt, const MemOperand& operand) { + Strex(al, rd, rt, operand); + } + + void Strexb(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + strexb(cond, rd, rt, operand); + } + void Strexb(Register rd, Register rt, const MemOperand& operand) { + Strexb(al, rd, rt, operand); + } + + void Strexd(Condition cond, + Register rd, + Register rt, + Register rt2, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + strexd(cond, rd, rt, rt2, operand); + } + void Strexd(Register rd, + Register rt, + Register rt2, + const MemOperand& operand) { + Strexd(al, rd, rt, rt2, operand); + } + + void Strexh(Condition cond, + Register rd, + Register rt, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + strexh(cond, rd, rt, operand); + } + void Strexh(Register rd, Register rt, const MemOperand& operand) { + Strexh(al, rd, rt, operand); + } + + void Strh(Condition cond, Register rt, const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // STRH{}{} , [ {, #{+}}] ; T1 + (operand.IsImmediate() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.IsOffsetImmediateWithinRange(0, 62, 2) && + (operand.GetAddrMode() == Offset)) || + // STRH{}{} , [, {+}] ; T1 + (operand.IsPlainRegister() && rt.IsLow() && + operand.GetBaseRegister().IsLow() && + operand.GetOffsetRegister().IsLow() && operand.GetSign().IsPlus() && + (operand.GetAddrMode() == Offset)); + ITScope it_scope(this, &cond, guard, can_use_it); + strh(cond, rt, operand); + } + void Strh(Register rt, const MemOperand& operand) { Strh(al, rt, operand); } + + void Sub(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + if (cond.Is(al) && rd.Is(rn) && operand.IsImmediate()) { + uint32_t immediate = operand.GetImmediate(); + if (immediate == 0) { + return; + } + } + bool can_use_it = + // SUB{} , , # ; T1 + (operand.IsImmediate() && (operand.GetImmediate() <= 7) && rn.IsLow() && + rd.IsLow()) || + // SUB{} {,} , # ; T2 + (operand.IsImmediate() && (operand.GetImmediate() <= 255) && + rd.IsLow() && rn.Is(rd)) || + // SUB{} , , + (operand.IsPlainRegister() && rd.IsLow() && rn.IsLow() && + operand.GetBaseRegister().IsLow()); + ITScope it_scope(this, &cond, guard, can_use_it); + sub(cond, rd, rn, operand); + } + void Sub(Register rd, Register rn, const Operand& operand) { + Sub(al, rd, rn, operand); + } + void Sub(FlagsUpdate flags, + Condition cond, + Register rd, + Register rn, + const Operand& operand) { + switch (flags) { + case LeaveFlags: + Sub(cond, rd, rn, operand); + break; + case SetFlags: + Subs(cond, rd, rn, operand); + break; + case DontCare: + bool setflags_is_smaller = + IsUsingT32() && cond.Is(al) && + ((operand.IsPlainRegister() && rd.IsLow() && rn.IsLow() && + operand.GetBaseRegister().IsLow()) || + (operand.IsImmediate() && + ((rd.IsLow() && rn.IsLow() && (operand.GetImmediate() < 8)) || + (rd.IsLow() && rn.Is(rd) && (operand.GetImmediate() < 256))))); + if (setflags_is_smaller) { + Subs(cond, rd, rn, operand); + } else { + bool changed_op_is_smaller = + operand.IsImmediate() && (operand.GetSignedImmediate() < 0) && + ((rd.IsLow() && rn.IsLow() && + (operand.GetSignedImmediate() >= -7)) || + (rd.IsLow() && rn.Is(rd) && + (operand.GetSignedImmediate() >= -255))); + if (changed_op_is_smaller) { + Adds(cond, rd, rn, -operand.GetSignedImmediate()); + } else { + Sub(cond, rd, rn, operand); + } + } + break; + } + } + void Sub(FlagsUpdate flags, + Register rd, + Register rn, + const Operand& operand) { + Sub(flags, al, rd, rn, operand); + } + + void Subs(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + subs(cond, rd, rn, operand); + } + void Subs(Register rd, Register rn, const Operand& operand) { + Subs(al, rd, rn, operand); + } + + void Svc(Condition cond, uint32_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + svc(cond, imm); + } + void Svc(uint32_t imm) { Svc(al, imm); } + + void Sxtab(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sxtab(cond, rd, rn, operand); + } + void Sxtab(Register rd, Register rn, const Operand& operand) { + Sxtab(al, rd, rn, operand); + } + + void Sxtab16(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sxtab16(cond, rd, rn, operand); + } + void Sxtab16(Register rd, Register rn, const Operand& operand) { + Sxtab16(al, rd, rn, operand); + } + + void Sxtah(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sxtah(cond, rd, rn, operand); + } + void Sxtah(Register rd, Register rn, const Operand& operand) { + Sxtah(al, rd, rn, operand); + } + + void Sxtb(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sxtb(cond, rd, operand); + } + void Sxtb(Register rd, const Operand& operand) { Sxtb(al, rd, operand); } + + void Sxtb16(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sxtb16(cond, rd, operand); + } + void Sxtb16(Register rd, const Operand& operand) { Sxtb16(al, rd, operand); } + + void Sxth(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + sxth(cond, rd, operand); + } + void Sxth(Register rd, const Operand& operand) { Sxth(al, rd, operand); } + + void Teq(Condition cond, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + teq(cond, rn, operand); + } + void Teq(Register rn, const Operand& operand) { Teq(al, rn, operand); } + + void Tst(Condition cond, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + bool can_use_it = + // TST{}{} , ; T1 + operand.IsPlainRegister() && rn.IsLow() && + operand.GetBaseRegister().IsLow(); + ITScope it_scope(this, &cond, guard, can_use_it); + tst(cond, rn, operand); + } + void Tst(Register rn, const Operand& operand) { Tst(al, rn, operand); } + + void Uadd16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uadd16(cond, rd, rn, rm); + } + void Uadd16(Register rd, Register rn, Register rm) { Uadd16(al, rd, rn, rm); } + + void Uadd8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uadd8(cond, rd, rn, rm); + } + void Uadd8(Register rd, Register rn, Register rm) { Uadd8(al, rd, rn, rm); } + + void Uasx(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uasx(cond, rd, rn, rm); + } + void Uasx(Register rd, Register rn, Register rm) { Uasx(al, rd, rn, rm); } + + void Ubfx( + Condition cond, Register rd, Register rn, uint32_t lsb, uint32_t width) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + ubfx(cond, rd, rn, lsb, width); + } + void Ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width) { + Ubfx(al, rd, rn, lsb, width); + } + + void Udf(Condition cond, uint32_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + udf(cond, imm); + } + void Udf(uint32_t imm) { Udf(al, imm); } + + void Udiv(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + udiv(cond, rd, rn, rm); + } + void Udiv(Register rd, Register rn, Register rm) { Udiv(al, rd, rn, rm); } + + void Uhadd16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uhadd16(cond, rd, rn, rm); + } + void Uhadd16(Register rd, Register rn, Register rm) { + Uhadd16(al, rd, rn, rm); + } + + void Uhadd8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uhadd8(cond, rd, rn, rm); + } + void Uhadd8(Register rd, Register rn, Register rm) { Uhadd8(al, rd, rn, rm); } + + void Uhasx(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uhasx(cond, rd, rn, rm); + } + void Uhasx(Register rd, Register rn, Register rm) { Uhasx(al, rd, rn, rm); } + + void Uhsax(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uhsax(cond, rd, rn, rm); + } + void Uhsax(Register rd, Register rn, Register rm) { Uhsax(al, rd, rn, rm); } + + void Uhsub16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uhsub16(cond, rd, rn, rm); + } + void Uhsub16(Register rd, Register rn, Register rm) { + Uhsub16(al, rd, rn, rm); + } + + void Uhsub8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uhsub8(cond, rd, rn, rm); + } + void Uhsub8(Register rd, Register rn, Register rm) { Uhsub8(al, rd, rn, rm); } + + void Umaal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + umaal(cond, rdlo, rdhi, rn, rm); + } + void Umaal(Register rdlo, Register rdhi, Register rn, Register rm) { + Umaal(al, rdlo, rdhi, rn, rm); + } + + void Umlal( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + umlal(cond, rdlo, rdhi, rn, rm); + } + void Umlal(Register rdlo, Register rdhi, Register rn, Register rm) { + Umlal(al, rdlo, rdhi, rn, rm); + } + void Umlal(FlagsUpdate flags, + Condition cond, + Register rdlo, + Register rdhi, + Register rn, + Register rm) { + switch (flags) { + case LeaveFlags: + Umlal(cond, rdlo, rdhi, rn, rm); + break; + case SetFlags: + Umlals(cond, rdlo, rdhi, rn, rm); + break; + case DontCare: + Umlal(cond, rdlo, rdhi, rn, rm); + break; + } + } + void Umlal(FlagsUpdate flags, + Register rdlo, + Register rdhi, + Register rn, + Register rm) { + Umlal(flags, al, rdlo, rdhi, rn, rm); + } + + void Umlals( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + umlals(cond, rdlo, rdhi, rn, rm); + } + void Umlals(Register rdlo, Register rdhi, Register rn, Register rm) { + Umlals(al, rdlo, rdhi, rn, rm); + } + + void Umull( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + umull(cond, rdlo, rdhi, rn, rm); + } + void Umull(Register rdlo, Register rdhi, Register rn, Register rm) { + Umull(al, rdlo, rdhi, rn, rm); + } + void Umull(FlagsUpdate flags, + Condition cond, + Register rdlo, + Register rdhi, + Register rn, + Register rm) { + switch (flags) { + case LeaveFlags: + Umull(cond, rdlo, rdhi, rn, rm); + break; + case SetFlags: + Umulls(cond, rdlo, rdhi, rn, rm); + break; + case DontCare: + Umull(cond, rdlo, rdhi, rn, rm); + break; + } + } + void Umull(FlagsUpdate flags, + Register rdlo, + Register rdhi, + Register rn, + Register rm) { + Umull(flags, al, rdlo, rdhi, rn, rm); + } + + void Umulls( + Condition cond, Register rdlo, Register rdhi, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdlo)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rdhi)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + umulls(cond, rdlo, rdhi, rn, rm); + } + void Umulls(Register rdlo, Register rdhi, Register rn, Register rm) { + Umulls(al, rdlo, rdhi, rn, rm); + } + + void Uqadd16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uqadd16(cond, rd, rn, rm); + } + void Uqadd16(Register rd, Register rn, Register rm) { + Uqadd16(al, rd, rn, rm); + } + + void Uqadd8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uqadd8(cond, rd, rn, rm); + } + void Uqadd8(Register rd, Register rn, Register rm) { Uqadd8(al, rd, rn, rm); } + + void Uqasx(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uqasx(cond, rd, rn, rm); + } + void Uqasx(Register rd, Register rn, Register rm) { Uqasx(al, rd, rn, rm); } + + void Uqsax(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uqsax(cond, rd, rn, rm); + } + void Uqsax(Register rd, Register rn, Register rm) { Uqsax(al, rd, rn, rm); } + + void Uqsub16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uqsub16(cond, rd, rn, rm); + } + void Uqsub16(Register rd, Register rn, Register rm) { + Uqsub16(al, rd, rn, rm); + } + + void Uqsub8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uqsub8(cond, rd, rn, rm); + } + void Uqsub8(Register rd, Register rn, Register rm) { Uqsub8(al, rd, rn, rm); } + + void Usad8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + usad8(cond, rd, rn, rm); + } + void Usad8(Register rd, Register rn, Register rm) { Usad8(al, rd, rn, rm); } + + void Usada8( + Condition cond, Register rd, Register rn, Register rm, Register ra) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(ra)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + usada8(cond, rd, rn, rm, ra); + } + void Usada8(Register rd, Register rn, Register rm, Register ra) { + Usada8(al, rd, rn, rm, ra); + } + + void Usat(Condition cond, Register rd, uint32_t imm, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + usat(cond, rd, imm, operand); + } + void Usat(Register rd, uint32_t imm, const Operand& operand) { + Usat(al, rd, imm, operand); + } + + void Usat16(Condition cond, Register rd, uint32_t imm, Register rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + usat16(cond, rd, imm, rn); + } + void Usat16(Register rd, uint32_t imm, Register rn) { + Usat16(al, rd, imm, rn); + } + + void Usax(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + usax(cond, rd, rn, rm); + } + void Usax(Register rd, Register rn, Register rm) { Usax(al, rd, rn, rm); } + + void Usub16(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + usub16(cond, rd, rn, rm); + } + void Usub16(Register rd, Register rn, Register rm) { Usub16(al, rd, rn, rm); } + + void Usub8(Condition cond, Register rd, Register rn, Register rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + usub8(cond, rd, rn, rm); + } + void Usub8(Register rd, Register rn, Register rm) { Usub8(al, rd, rn, rm); } + + void Uxtab(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uxtab(cond, rd, rn, operand); + } + void Uxtab(Register rd, Register rn, const Operand& operand) { + Uxtab(al, rd, rn, operand); + } + + void Uxtab16(Condition cond, + Register rd, + Register rn, + const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uxtab16(cond, rd, rn, operand); + } + void Uxtab16(Register rd, Register rn, const Operand& operand) { + Uxtab16(al, rd, rn, operand); + } + + void Uxtah(Condition cond, Register rd, Register rn, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uxtah(cond, rd, rn, operand); + } + void Uxtah(Register rd, Register rn, const Operand& operand) { + Uxtah(al, rd, rn, operand); + } + + void Uxtb(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uxtb(cond, rd, operand); + } + void Uxtb(Register rd, const Operand& operand) { Uxtb(al, rd, operand); } + + void Uxtb16(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uxtb16(cond, rd, operand); + } + void Uxtb16(Register rd, const Operand& operand) { Uxtb16(al, rd, operand); } + + void Uxth(Condition cond, Register rd, const Operand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + uxth(cond, rd, operand); + } + void Uxth(Register rd, const Operand& operand) { Uxth(al, rd, operand); } + + void Vaba( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vaba(cond, dt, rd, rn, rm); + } + void Vaba(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vaba(al, dt, rd, rn, rm); + } + + void Vaba( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vaba(cond, dt, rd, rn, rm); + } + void Vaba(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vaba(al, dt, rd, rn, rm); + } + + void Vabal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vabal(cond, dt, rd, rn, rm); + } + void Vabal(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vabal(al, dt, rd, rn, rm); + } + + void Vabd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vabd(cond, dt, rd, rn, rm); + } + void Vabd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vabd(al, dt, rd, rn, rm); + } + + void Vabd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vabd(cond, dt, rd, rn, rm); + } + void Vabd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vabd(al, dt, rd, rn, rm); + } + + void Vabdl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vabdl(cond, dt, rd, rn, rm); + } + void Vabdl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vabdl(al, dt, rd, rn, rm); + } + + void Vabs(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vabs(cond, dt, rd, rm); + } + void Vabs(DataType dt, DRegister rd, DRegister rm) { Vabs(al, dt, rd, rm); } + + void Vabs(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vabs(cond, dt, rd, rm); + } + void Vabs(DataType dt, QRegister rd, QRegister rm) { Vabs(al, dt, rd, rm); } + + void Vabs(Condition cond, DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vabs(cond, dt, rd, rm); + } + void Vabs(DataType dt, SRegister rd, SRegister rm) { Vabs(al, dt, rd, rm); } + + void Vacge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vacge(cond, dt, rd, rn, rm); + } + void Vacge(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vacge(al, dt, rd, rn, rm); + } + + void Vacge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vacge(cond, dt, rd, rn, rm); + } + void Vacge(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vacge(al, dt, rd, rn, rm); + } + + void Vacgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vacgt(cond, dt, rd, rn, rm); + } + void Vacgt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vacgt(al, dt, rd, rn, rm); + } + + void Vacgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vacgt(cond, dt, rd, rn, rm); + } + void Vacgt(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vacgt(al, dt, rd, rn, rm); + } + + void Vacle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vacle(cond, dt, rd, rn, rm); + } + void Vacle(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vacle(al, dt, rd, rn, rm); + } + + void Vacle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vacle(cond, dt, rd, rn, rm); + } + void Vacle(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vacle(al, dt, rd, rn, rm); + } + + void Vaclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vaclt(cond, dt, rd, rn, rm); + } + void Vaclt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vaclt(al, dt, rd, rn, rm); + } + + void Vaclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vaclt(cond, dt, rd, rn, rm); + } + void Vaclt(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vaclt(al, dt, rd, rn, rm); + } + + void Vadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vadd(cond, dt, rd, rn, rm); + } + void Vadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vadd(al, dt, rd, rn, rm); + } + + void Vadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vadd(cond, dt, rd, rn, rm); + } + void Vadd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vadd(al, dt, rd, rn, rm); + } + + void Vadd( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vadd(cond, dt, rd, rn, rm); + } + void Vadd(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vadd(al, dt, rd, rn, rm); + } + + void Vaddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vaddhn(cond, dt, rd, rn, rm); + } + void Vaddhn(DataType dt, DRegister rd, QRegister rn, QRegister rm) { + Vaddhn(al, dt, rd, rn, rm); + } + + void Vaddl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vaddl(cond, dt, rd, rn, rm); + } + void Vaddl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vaddl(al, dt, rd, rn, rm); + } + + void Vaddw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vaddw(cond, dt, rd, rn, rm); + } + void Vaddw(DataType dt, QRegister rd, QRegister rn, DRegister rm) { + Vaddw(al, dt, rd, rn, rm); + } + + void Vand(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vand(cond, dt, rd, rn, operand); + } + void Vand(DataType dt, DRegister rd, DRegister rn, const DOperand& operand) { + Vand(al, dt, rd, rn, operand); + } + + void Vand(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vand(cond, dt, rd, rn, operand); + } + void Vand(DataType dt, QRegister rd, QRegister rn, const QOperand& operand) { + Vand(al, dt, rd, rn, operand); + } + + void Vbic(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vbic(cond, dt, rd, rn, operand); + } + void Vbic(DataType dt, DRegister rd, DRegister rn, const DOperand& operand) { + Vbic(al, dt, rd, rn, operand); + } + + void Vbic(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vbic(cond, dt, rd, rn, operand); + } + void Vbic(DataType dt, QRegister rd, QRegister rn, const QOperand& operand) { + Vbic(al, dt, rd, rn, operand); + } + + void Vbif( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vbif(cond, dt, rd, rn, rm); + } + void Vbif(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vbif(al, dt, rd, rn, rm); + } + void Vbif(Condition cond, DRegister rd, DRegister rn, DRegister rm) { + Vbif(cond, kDataTypeValueNone, rd, rn, rm); + } + void Vbif(DRegister rd, DRegister rn, DRegister rm) { + Vbif(al, kDataTypeValueNone, rd, rn, rm); + } + + void Vbif( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vbif(cond, dt, rd, rn, rm); + } + void Vbif(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vbif(al, dt, rd, rn, rm); + } + void Vbif(Condition cond, QRegister rd, QRegister rn, QRegister rm) { + Vbif(cond, kDataTypeValueNone, rd, rn, rm); + } + void Vbif(QRegister rd, QRegister rn, QRegister rm) { + Vbif(al, kDataTypeValueNone, rd, rn, rm); + } + + void Vbit( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vbit(cond, dt, rd, rn, rm); + } + void Vbit(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vbit(al, dt, rd, rn, rm); + } + void Vbit(Condition cond, DRegister rd, DRegister rn, DRegister rm) { + Vbit(cond, kDataTypeValueNone, rd, rn, rm); + } + void Vbit(DRegister rd, DRegister rn, DRegister rm) { + Vbit(al, kDataTypeValueNone, rd, rn, rm); + } + + void Vbit( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vbit(cond, dt, rd, rn, rm); + } + void Vbit(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vbit(al, dt, rd, rn, rm); + } + void Vbit(Condition cond, QRegister rd, QRegister rn, QRegister rm) { + Vbit(cond, kDataTypeValueNone, rd, rn, rm); + } + void Vbit(QRegister rd, QRegister rn, QRegister rm) { + Vbit(al, kDataTypeValueNone, rd, rn, rm); + } + + void Vbsl( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vbsl(cond, dt, rd, rn, rm); + } + void Vbsl(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vbsl(al, dt, rd, rn, rm); + } + void Vbsl(Condition cond, DRegister rd, DRegister rn, DRegister rm) { + Vbsl(cond, kDataTypeValueNone, rd, rn, rm); + } + void Vbsl(DRegister rd, DRegister rn, DRegister rm) { + Vbsl(al, kDataTypeValueNone, rd, rn, rm); + } + + void Vbsl( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vbsl(cond, dt, rd, rn, rm); + } + void Vbsl(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vbsl(al, dt, rd, rn, rm); + } + void Vbsl(Condition cond, QRegister rd, QRegister rn, QRegister rm) { + Vbsl(cond, kDataTypeValueNone, rd, rn, rm); + } + void Vbsl(QRegister rd, QRegister rn, QRegister rm) { + Vbsl(al, kDataTypeValueNone, rd, rn, rm); + } + + void Vceq(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vceq(cond, dt, rd, rm, operand); + } + void Vceq(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vceq(al, dt, rd, rm, operand); + } + + void Vceq(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vceq(cond, dt, rd, rm, operand); + } + void Vceq(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vceq(al, dt, rd, rm, operand); + } + + void Vceq( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vceq(cond, dt, rd, rn, rm); + } + void Vceq(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vceq(al, dt, rd, rn, rm); + } + + void Vceq( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vceq(cond, dt, rd, rn, rm); + } + void Vceq(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vceq(al, dt, rd, rn, rm); + } + + void Vcge(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcge(cond, dt, rd, rm, operand); + } + void Vcge(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vcge(al, dt, rd, rm, operand); + } + + void Vcge(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcge(cond, dt, rd, rm, operand); + } + void Vcge(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vcge(al, dt, rd, rm, operand); + } + + void Vcge( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcge(cond, dt, rd, rn, rm); + } + void Vcge(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vcge(al, dt, rd, rn, rm); + } + + void Vcge( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcge(cond, dt, rd, rn, rm); + } + void Vcge(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vcge(al, dt, rd, rn, rm); + } + + void Vcgt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcgt(cond, dt, rd, rm, operand); + } + void Vcgt(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vcgt(al, dt, rd, rm, operand); + } + + void Vcgt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcgt(cond, dt, rd, rm, operand); + } + void Vcgt(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vcgt(al, dt, rd, rm, operand); + } + + void Vcgt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcgt(cond, dt, rd, rn, rm); + } + void Vcgt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vcgt(al, dt, rd, rn, rm); + } + + void Vcgt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcgt(cond, dt, rd, rn, rm); + } + void Vcgt(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vcgt(al, dt, rd, rn, rm); + } + + void Vcle(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcle(cond, dt, rd, rm, operand); + } + void Vcle(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vcle(al, dt, rd, rm, operand); + } + + void Vcle(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcle(cond, dt, rd, rm, operand); + } + void Vcle(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vcle(al, dt, rd, rm, operand); + } + + void Vcle( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcle(cond, dt, rd, rn, rm); + } + void Vcle(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vcle(al, dt, rd, rn, rm); + } + + void Vcle( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcle(cond, dt, rd, rn, rm); + } + void Vcle(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vcle(al, dt, rd, rn, rm); + } + + void Vcls(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcls(cond, dt, rd, rm); + } + void Vcls(DataType dt, DRegister rd, DRegister rm) { Vcls(al, dt, rd, rm); } + + void Vcls(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcls(cond, dt, rd, rm); + } + void Vcls(DataType dt, QRegister rd, QRegister rm) { Vcls(al, dt, rd, rm); } + + void Vclt(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vclt(cond, dt, rd, rm, operand); + } + void Vclt(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vclt(al, dt, rd, rm, operand); + } + + void Vclt(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vclt(cond, dt, rd, rm, operand); + } + void Vclt(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vclt(al, dt, rd, rm, operand); + } + + void Vclt( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vclt(cond, dt, rd, rn, rm); + } + void Vclt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vclt(al, dt, rd, rn, rm); + } + + void Vclt( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vclt(cond, dt, rd, rn, rm); + } + void Vclt(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vclt(al, dt, rd, rn, rm); + } + + void Vclz(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vclz(cond, dt, rd, rm); + } + void Vclz(DataType dt, DRegister rd, DRegister rm) { Vclz(al, dt, rd, rm); } + + void Vclz(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vclz(cond, dt, rd, rm); + } + void Vclz(DataType dt, QRegister rd, QRegister rm) { Vclz(al, dt, rd, rm); } + + void Vcmp(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcmp(cond, dt, rd, operand); + } + void Vcmp(DataType dt, SRegister rd, const SOperand& operand) { + Vcmp(al, dt, rd, operand); + } + + void Vcmp(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcmp(cond, dt, rd, operand); + } + void Vcmp(DataType dt, DRegister rd, const DOperand& operand) { + Vcmp(al, dt, rd, operand); + } + + void Vcmpe(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcmpe(cond, dt, rd, operand); + } + void Vcmpe(DataType dt, SRegister rd, const SOperand& operand) { + Vcmpe(al, dt, rd, operand); + } + + void Vcmpe(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcmpe(cond, dt, rd, operand); + } + void Vcmpe(DataType dt, DRegister rd, const DOperand& operand) { + Vcmpe(al, dt, rd, operand); + } + + void Vcnt(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcnt(cond, dt, rd, rm); + } + void Vcnt(DataType dt, DRegister rd, DRegister rm) { Vcnt(al, dt, rd, rm); } + + void Vcnt(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcnt(cond, dt, rd, rm); + } + void Vcnt(DataType dt, QRegister rd, QRegister rm) { Vcnt(al, dt, rd, rm); } + + void Vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm); + } + void Vcvt(DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + Vcvt(al, dt1, dt2, rd, rm); + } + + void Vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm); + } + void Vcvt(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + Vcvt(al, dt1, dt2, rd, rm); + } + + void Vcvt(Condition cond, + DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm, + int32_t fbits) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm, fbits); + } + void Vcvt( + DataType dt1, DataType dt2, DRegister rd, DRegister rm, int32_t fbits) { + Vcvt(al, dt1, dt2, rd, rm, fbits); + } + + void Vcvt(Condition cond, + DataType dt1, + DataType dt2, + QRegister rd, + QRegister rm, + int32_t fbits) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm, fbits); + } + void Vcvt( + DataType dt1, DataType dt2, QRegister rd, QRegister rm, int32_t fbits) { + Vcvt(al, dt1, dt2, rd, rm, fbits); + } + + void Vcvt(Condition cond, + DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm, + int32_t fbits) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm, fbits); + } + void Vcvt( + DataType dt1, DataType dt2, SRegister rd, SRegister rm, int32_t fbits) { + Vcvt(al, dt1, dt2, rd, rm, fbits); + } + + void Vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm); + } + void Vcvt(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + Vcvt(al, dt1, dt2, rd, rm); + } + + void Vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm); + } + void Vcvt(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + Vcvt(al, dt1, dt2, rd, rm); + } + + void Vcvt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm); + } + void Vcvt(DataType dt1, DataType dt2, DRegister rd, QRegister rm) { + Vcvt(al, dt1, dt2, rd, rm); + } + + void Vcvt( + Condition cond, DataType dt1, DataType dt2, QRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm); + } + void Vcvt(DataType dt1, DataType dt2, QRegister rd, DRegister rm) { + Vcvt(al, dt1, dt2, rd, rm); + } + + void Vcvt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvt(cond, dt1, dt2, rd, rm); + } + void Vcvt(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + Vcvt(al, dt1, dt2, rd, rm); + } + + void Vcvta(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvta(dt1, dt2, rd, rm); + } + + void Vcvta(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvta(dt1, dt2, rd, rm); + } + + void Vcvta(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvta(dt1, dt2, rd, rm); + } + + void Vcvta(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvta(dt1, dt2, rd, rm); + } + + void Vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvtb(cond, dt1, dt2, rd, rm); + } + void Vcvtb(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + Vcvtb(al, dt1, dt2, rd, rm); + } + + void Vcvtb( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvtb(cond, dt1, dt2, rd, rm); + } + void Vcvtb(DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + Vcvtb(al, dt1, dt2, rd, rm); + } + + void Vcvtb( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvtb(cond, dt1, dt2, rd, rm); + } + void Vcvtb(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + Vcvtb(al, dt1, dt2, rd, rm); + } + + void Vcvtm(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtm(dt1, dt2, rd, rm); + } + + void Vcvtm(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtm(dt1, dt2, rd, rm); + } + + void Vcvtm(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtm(dt1, dt2, rd, rm); + } + + void Vcvtm(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtm(dt1, dt2, rd, rm); + } + + void Vcvtn(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtn(dt1, dt2, rd, rm); + } + + void Vcvtn(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtn(dt1, dt2, rd, rm); + } + + void Vcvtn(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtn(dt1, dt2, rd, rm); + } + + void Vcvtn(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtn(dt1, dt2, rd, rm); + } + + void Vcvtp(DataType dt1, DataType dt2, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtp(dt1, dt2, rd, rm); + } + + void Vcvtp(DataType dt1, DataType dt2, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtp(dt1, dt2, rd, rm); + } + + void Vcvtp(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtp(dt1, dt2, rd, rm); + } + + void Vcvtp(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vcvtp(dt1, dt2, rd, rm); + } + + void Vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvtr(cond, dt1, dt2, rd, rm); + } + void Vcvtr(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + Vcvtr(al, dt1, dt2, rd, rm); + } + + void Vcvtr( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvtr(cond, dt1, dt2, rd, rm); + } + void Vcvtr(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + Vcvtr(al, dt1, dt2, rd, rm); + } + + void Vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvtt(cond, dt1, dt2, rd, rm); + } + void Vcvtt(DataType dt1, DataType dt2, SRegister rd, SRegister rm) { + Vcvtt(al, dt1, dt2, rd, rm); + } + + void Vcvtt( + Condition cond, DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvtt(cond, dt1, dt2, rd, rm); + } + void Vcvtt(DataType dt1, DataType dt2, DRegister rd, SRegister rm) { + Vcvtt(al, dt1, dt2, rd, rm); + } + + void Vcvtt( + Condition cond, DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vcvtt(cond, dt1, dt2, rd, rm); + } + void Vcvtt(DataType dt1, DataType dt2, SRegister rd, DRegister rm) { + Vcvtt(al, dt1, dt2, rd, rm); + } + + void Vdiv( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vdiv(cond, dt, rd, rn, rm); + } + void Vdiv(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vdiv(al, dt, rd, rn, rm); + } + + void Vdiv( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vdiv(cond, dt, rd, rn, rm); + } + void Vdiv(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vdiv(al, dt, rd, rn, rm); + } + + void Vdup(Condition cond, DataType dt, QRegister rd, Register rt) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vdup(cond, dt, rd, rt); + } + void Vdup(DataType dt, QRegister rd, Register rt) { Vdup(al, dt, rd, rt); } + + void Vdup(Condition cond, DataType dt, DRegister rd, Register rt) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vdup(cond, dt, rd, rt); + } + void Vdup(DataType dt, DRegister rd, Register rt) { Vdup(al, dt, rd, rt); } + + void Vdup(Condition cond, DataType dt, DRegister rd, DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vdup(cond, dt, rd, rm); + } + void Vdup(DataType dt, DRegister rd, DRegisterLane rm) { + Vdup(al, dt, rd, rm); + } + + void Vdup(Condition cond, DataType dt, QRegister rd, DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vdup(cond, dt, rd, rm); + } + void Vdup(DataType dt, QRegister rd, DRegisterLane rm) { + Vdup(al, dt, rd, rm); + } + + void Veor( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + veor(cond, dt, rd, rn, rm); + } + void Veor(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Veor(al, dt, rd, rn, rm); + } + void Veor(Condition cond, DRegister rd, DRegister rn, DRegister rm) { + Veor(cond, kDataTypeValueNone, rd, rn, rm); + } + void Veor(DRegister rd, DRegister rn, DRegister rm) { + Veor(al, kDataTypeValueNone, rd, rn, rm); + } + + void Veor( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + veor(cond, dt, rd, rn, rm); + } + void Veor(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Veor(al, dt, rd, rn, rm); + } + void Veor(Condition cond, QRegister rd, QRegister rn, QRegister rm) { + Veor(cond, kDataTypeValueNone, rd, rn, rm); + } + void Veor(QRegister rd, QRegister rn, QRegister rm) { + Veor(al, kDataTypeValueNone, rd, rn, rm); + } + + void Vext(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vext(cond, dt, rd, rn, rm, operand); + } + void Vext(DataType dt, + DRegister rd, + DRegister rn, + DRegister rm, + const DOperand& operand) { + Vext(al, dt, rd, rn, rm, operand); + } + + void Vext(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vext(cond, dt, rd, rn, rm, operand); + } + void Vext(DataType dt, + QRegister rd, + QRegister rn, + QRegister rm, + const QOperand& operand) { + Vext(al, dt, rd, rn, rm, operand); + } + + void Vfma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfma(cond, dt, rd, rn, rm); + } + void Vfma(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vfma(al, dt, rd, rn, rm); + } + + void Vfma( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfma(cond, dt, rd, rn, rm); + } + void Vfma(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vfma(al, dt, rd, rn, rm); + } + + void Vfma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfma(cond, dt, rd, rn, rm); + } + void Vfma(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vfma(al, dt, rd, rn, rm); + } + + void Vfms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfms(cond, dt, rd, rn, rm); + } + void Vfms(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vfms(al, dt, rd, rn, rm); + } + + void Vfms( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfms(cond, dt, rd, rn, rm); + } + void Vfms(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vfms(al, dt, rd, rn, rm); + } + + void Vfms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfms(cond, dt, rd, rn, rm); + } + void Vfms(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vfms(al, dt, rd, rn, rm); + } + + void Vfnma( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfnma(cond, dt, rd, rn, rm); + } + void Vfnma(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vfnma(al, dt, rd, rn, rm); + } + + void Vfnma( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfnma(cond, dt, rd, rn, rm); + } + void Vfnma(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vfnma(al, dt, rd, rn, rm); + } + + void Vfnms( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfnms(cond, dt, rd, rn, rm); + } + void Vfnms(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vfnms(al, dt, rd, rn, rm); + } + + void Vfnms( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vfnms(cond, dt, rd, rn, rm); + } + void Vfnms(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vfnms(al, dt, rd, rn, rm); + } + + void Vhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vhadd(cond, dt, rd, rn, rm); + } + void Vhadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vhadd(al, dt, rd, rn, rm); + } + + void Vhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vhadd(cond, dt, rd, rn, rm); + } + void Vhadd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vhadd(al, dt, rd, rn, rm); + } + + void Vhsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vhsub(cond, dt, rd, rn, rm); + } + void Vhsub(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vhsub(al, dt, rd, rn, rm); + } + + void Vhsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vhsub(cond, dt, rd, rn, rm); + } + void Vhsub(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vhsub(al, dt, rd, rn, rm); + } + + void Vld1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vld1(cond, dt, nreglist, operand); + } + void Vld1(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + Vld1(al, dt, nreglist, operand); + } + + void Vld2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vld2(cond, dt, nreglist, operand); + } + void Vld2(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + Vld2(al, dt, nreglist, operand); + } + + void Vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vld3(cond, dt, nreglist, operand); + } + void Vld3(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + Vld3(al, dt, nreglist, operand); + } + + void Vld3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vld3(cond, dt, nreglist, operand); + } + void Vld3(DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + Vld3(al, dt, nreglist, operand); + } + + void Vld4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vld4(cond, dt, nreglist, operand); + } + void Vld4(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + Vld4(al, dt, nreglist, operand); + } + + void Vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vldm(cond, dt, rn, write_back, dreglist); + } + void Vldm(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vldm(al, dt, rn, write_back, dreglist); + } + void Vldm(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vldm(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + void Vldm(Register rn, WriteBack write_back, DRegisterList dreglist) { + Vldm(al, kDataTypeValueNone, rn, write_back, dreglist); + } + + void Vldm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(sreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vldm(cond, dt, rn, write_back, sreglist); + } + void Vldm(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vldm(al, dt, rn, write_back, sreglist); + } + void Vldm(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vldm(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + void Vldm(Register rn, WriteBack write_back, SRegisterList sreglist) { + Vldm(al, kDataTypeValueNone, rn, write_back, sreglist); + } + + void Vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vldmdb(cond, dt, rn, write_back, dreglist); + } + void Vldmdb(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vldmdb(al, dt, rn, write_back, dreglist); + } + void Vldmdb(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vldmdb(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + void Vldmdb(Register rn, WriteBack write_back, DRegisterList dreglist) { + Vldmdb(al, kDataTypeValueNone, rn, write_back, dreglist); + } + + void Vldmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(sreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vldmdb(cond, dt, rn, write_back, sreglist); + } + void Vldmdb(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vldmdb(al, dt, rn, write_back, sreglist); + } + void Vldmdb(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vldmdb(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + void Vldmdb(Register rn, WriteBack write_back, SRegisterList sreglist) { + Vldmdb(al, kDataTypeValueNone, rn, write_back, sreglist); + } + + void Vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vldmia(cond, dt, rn, write_back, dreglist); + } + void Vldmia(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vldmia(al, dt, rn, write_back, dreglist); + } + void Vldmia(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vldmia(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + void Vldmia(Register rn, WriteBack write_back, DRegisterList dreglist) { + Vldmia(al, kDataTypeValueNone, rn, write_back, dreglist); + } + + void Vldmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(sreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vldmia(cond, dt, rn, write_back, sreglist); + } + void Vldmia(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vldmia(al, dt, rn, write_back, sreglist); + } + void Vldmia(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vldmia(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + void Vldmia(Register rn, WriteBack write_back, SRegisterList sreglist) { + Vldmia(al, kDataTypeValueNone, rn, write_back, sreglist); + } + + + void Vldr(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vldr(cond, dt, rd, operand); + } + void Vldr(DataType dt, DRegister rd, const MemOperand& operand) { + Vldr(al, dt, rd, operand); + } + void Vldr(Condition cond, DRegister rd, const MemOperand& operand) { + Vldr(cond, Untyped64, rd, operand); + } + void Vldr(DRegister rd, const MemOperand& operand) { + Vldr(al, Untyped64, rd, operand); + } + + + void Vldr(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vldr(cond, dt, rd, operand); + } + void Vldr(DataType dt, SRegister rd, const MemOperand& operand) { + Vldr(al, dt, rd, operand); + } + void Vldr(Condition cond, SRegister rd, const MemOperand& operand) { + Vldr(cond, Untyped32, rd, operand); + } + void Vldr(SRegister rd, const MemOperand& operand) { + Vldr(al, Untyped32, rd, operand); + } + + void Vmax( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmax(cond, dt, rd, rn, rm); + } + void Vmax(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vmax(al, dt, rd, rn, rm); + } + + void Vmax( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmax(cond, dt, rd, rn, rm); + } + void Vmax(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vmax(al, dt, rd, rn, rm); + } + + void Vmaxnm(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vmaxnm(dt, rd, rn, rm); + } + + void Vmaxnm(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vmaxnm(dt, rd, rn, rm); + } + + void Vmaxnm(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vmaxnm(dt, rd, rn, rm); + } + + void Vmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmin(cond, dt, rd, rn, rm); + } + void Vmin(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vmin(al, dt, rd, rn, rm); + } + + void Vmin( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmin(cond, dt, rd, rn, rm); + } + void Vmin(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vmin(al, dt, rd, rn, rm); + } + + void Vminnm(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vminnm(dt, rd, rn, rm); + } + + void Vminnm(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vminnm(dt, rd, rn, rm); + } + + void Vminnm(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vminnm(dt, rd, rn, rm); + } + + void Vmla(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmla(cond, dt, rd, rn, rm); + } + void Vmla(DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + Vmla(al, dt, rd, rn, rm); + } + + void Vmla(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmla(cond, dt, rd, rn, rm); + } + void Vmla(DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + Vmla(al, dt, rd, rn, rm); + } + + void Vmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmla(cond, dt, rd, rn, rm); + } + void Vmla(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vmla(al, dt, rd, rn, rm); + } + + void Vmla( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmla(cond, dt, rd, rn, rm); + } + void Vmla(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vmla(al, dt, rd, rn, rm); + } + + void Vmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmla(cond, dt, rd, rn, rm); + } + void Vmla(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vmla(al, dt, rd, rn, rm); + } + + void Vmlal(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmlal(cond, dt, rd, rn, rm); + } + void Vmlal(DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + Vmlal(al, dt, rd, rn, rm); + } + + void Vmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmlal(cond, dt, rd, rn, rm); + } + void Vmlal(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vmlal(al, dt, rd, rn, rm); + } + + void Vmls(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmls(cond, dt, rd, rn, rm); + } + void Vmls(DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + Vmls(al, dt, rd, rn, rm); + } + + void Vmls(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmls(cond, dt, rd, rn, rm); + } + void Vmls(DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + Vmls(al, dt, rd, rn, rm); + } + + void Vmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmls(cond, dt, rd, rn, rm); + } + void Vmls(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vmls(al, dt, rd, rn, rm); + } + + void Vmls( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmls(cond, dt, rd, rn, rm); + } + void Vmls(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vmls(al, dt, rd, rn, rm); + } + + void Vmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmls(cond, dt, rd, rn, rm); + } + void Vmls(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vmls(al, dt, rd, rn, rm); + } + + void Vmlsl(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmlsl(cond, dt, rd, rn, rm); + } + void Vmlsl(DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + Vmlsl(al, dt, rd, rn, rm); + } + + void Vmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmlsl(cond, dt, rd, rn, rm); + } + void Vmlsl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vmlsl(al, dt, rd, rn, rm); + } + + void Vmov(Condition cond, Register rt, SRegister rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, rt, rn); + } + void Vmov(Register rt, SRegister rn) { Vmov(al, rt, rn); } + + void Vmov(Condition cond, SRegister rn, Register rt) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, rn, rt); + } + void Vmov(SRegister rn, Register rt) { Vmov(al, rn, rt); } + + void Vmov(Condition cond, Register rt, Register rt2, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, rt, rt2, rm); + } + void Vmov(Register rt, Register rt2, DRegister rm) { Vmov(al, rt, rt2, rm); } + + void Vmov(Condition cond, DRegister rm, Register rt, Register rt2) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, rm, rt, rt2); + } + void Vmov(DRegister rm, Register rt, Register rt2) { Vmov(al, rm, rt, rt2); } + + void Vmov( + Condition cond, Register rt, Register rt2, SRegister rm, SRegister rm1) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm1)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, rt, rt2, rm, rm1); + } + void Vmov(Register rt, Register rt2, SRegister rm, SRegister rm1) { + Vmov(al, rt, rt2, rm, rm1); + } + + void Vmov( + Condition cond, SRegister rm, SRegister rm1, Register rt, Register rt2) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm1)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, rm, rm1, rt, rt2); + } + void Vmov(SRegister rm, SRegister rm1, Register rt, Register rt2) { + Vmov(al, rm, rm1, rt, rt2); + } + + void Vmov(Condition cond, DataType dt, DRegisterLane rd, Register rt) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, dt, rd, rt); + } + void Vmov(DataType dt, DRegisterLane rd, Register rt) { + Vmov(al, dt, rd, rt); + } + void Vmov(Condition cond, DRegisterLane rd, Register rt) { + Vmov(cond, kDataTypeValueNone, rd, rt); + } + void Vmov(DRegisterLane rd, Register rt) { + Vmov(al, kDataTypeValueNone, rd, rt); + } + + void Vmov(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, dt, rd, operand); + } + void Vmov(DataType dt, DRegister rd, const DOperand& operand) { + Vmov(al, dt, rd, operand); + } + + void Vmov(Condition cond, + DataType dt, + QRegister rd, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, dt, rd, operand); + } + void Vmov(DataType dt, QRegister rd, const QOperand& operand) { + Vmov(al, dt, rd, operand); + } + + void Vmov(Condition cond, + DataType dt, + SRegister rd, + const SOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, dt, rd, operand); + } + void Vmov(DataType dt, SRegister rd, const SOperand& operand) { + Vmov(al, dt, rd, operand); + } + + void Vmov(Condition cond, DataType dt, Register rt, DRegisterLane rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmov(cond, dt, rt, rn); + } + void Vmov(DataType dt, Register rt, DRegisterLane rn) { + Vmov(al, dt, rt, rn); + } + void Vmov(Condition cond, Register rt, DRegisterLane rn) { + Vmov(cond, kDataTypeValueNone, rt, rn); + } + void Vmov(Register rt, DRegisterLane rn) { + Vmov(al, kDataTypeValueNone, rt, rn); + } + + void Vmovl(Condition cond, DataType dt, QRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmovl(cond, dt, rd, rm); + } + void Vmovl(DataType dt, QRegister rd, DRegister rm) { Vmovl(al, dt, rd, rm); } + + void Vmovn(Condition cond, DataType dt, DRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmovn(cond, dt, rd, rm); + } + void Vmovn(DataType dt, DRegister rd, QRegister rm) { Vmovn(al, dt, rd, rm); } + + void Vmrs(Condition cond, + RegisterOrAPSR_nzcv rt, + SpecialFPRegister spec_reg) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmrs(cond, rt, spec_reg); + } + void Vmrs(RegisterOrAPSR_nzcv rt, SpecialFPRegister spec_reg) { + Vmrs(al, rt, spec_reg); + } + + void Vmsr(Condition cond, SpecialFPRegister spec_reg, Register rt) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rt)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmsr(cond, spec_reg, rt); + } + void Vmsr(SpecialFPRegister spec_reg, Register rt) { Vmsr(al, spec_reg, rt); } + + void Vmul(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmul(cond, dt, rd, rn, dm, index); + } + void Vmul( + DataType dt, DRegister rd, DRegister rn, DRegister dm, unsigned index) { + Vmul(al, dt, rd, rn, dm, index); + } + + void Vmul(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmul(cond, dt, rd, rn, dm, index); + } + void Vmul( + DataType dt, QRegister rd, QRegister rn, DRegister dm, unsigned index) { + Vmul(al, dt, rd, rn, dm, index); + } + + void Vmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmul(cond, dt, rd, rn, rm); + } + void Vmul(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vmul(al, dt, rd, rn, rm); + } + + void Vmul( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmul(cond, dt, rd, rn, rm); + } + void Vmul(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vmul(al, dt, rd, rn, rm); + } + + void Vmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmul(cond, dt, rd, rn, rm); + } + void Vmul(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vmul(al, dt, rd, rn, rm); + } + + void Vmull(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmull(cond, dt, rd, rn, dm, index); + } + void Vmull( + DataType dt, QRegister rd, DRegister rn, DRegister dm, unsigned index) { + Vmull(al, dt, rd, rn, dm, index); + } + + void Vmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmull(cond, dt, rd, rn, rm); + } + void Vmull(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vmull(al, dt, rd, rn, rm); + } + + void Vmvn(Condition cond, + DataType dt, + DRegister rd, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmvn(cond, dt, rd, operand); + } + void Vmvn(DataType dt, DRegister rd, const DOperand& operand) { + Vmvn(al, dt, rd, operand); + } + + void Vmvn(Condition cond, + DataType dt, + QRegister rd, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vmvn(cond, dt, rd, operand); + } + void Vmvn(DataType dt, QRegister rd, const QOperand& operand) { + Vmvn(al, dt, rd, operand); + } + + void Vneg(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vneg(cond, dt, rd, rm); + } + void Vneg(DataType dt, DRegister rd, DRegister rm) { Vneg(al, dt, rd, rm); } + + void Vneg(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vneg(cond, dt, rd, rm); + } + void Vneg(DataType dt, QRegister rd, QRegister rm) { Vneg(al, dt, rd, rm); } + + void Vneg(Condition cond, DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vneg(cond, dt, rd, rm); + } + void Vneg(DataType dt, SRegister rd, SRegister rm) { Vneg(al, dt, rd, rm); } + + void Vnmla( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vnmla(cond, dt, rd, rn, rm); + } + void Vnmla(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vnmla(al, dt, rd, rn, rm); + } + + void Vnmla( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vnmla(cond, dt, rd, rn, rm); + } + void Vnmla(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vnmla(al, dt, rd, rn, rm); + } + + void Vnmls( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vnmls(cond, dt, rd, rn, rm); + } + void Vnmls(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vnmls(al, dt, rd, rn, rm); + } + + void Vnmls( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vnmls(cond, dt, rd, rn, rm); + } + void Vnmls(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vnmls(al, dt, rd, rn, rm); + } + + void Vnmul( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vnmul(cond, dt, rd, rn, rm); + } + void Vnmul(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vnmul(al, dt, rd, rn, rm); + } + + void Vnmul( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vnmul(cond, dt, rd, rn, rm); + } + void Vnmul(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vnmul(al, dt, rd, rn, rm); + } + + void Vorn(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vorn(cond, dt, rd, rn, operand); + } + void Vorn(DataType dt, DRegister rd, DRegister rn, const DOperand& operand) { + Vorn(al, dt, rd, rn, operand); + } + + void Vorn(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vorn(cond, dt, rd, rn, operand); + } + void Vorn(DataType dt, QRegister rd, QRegister rn, const QOperand& operand) { + Vorn(al, dt, rd, rn, operand); + } + + void Vorr(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vorr(cond, dt, rd, rn, operand); + } + void Vorr(DataType dt, DRegister rd, DRegister rn, const DOperand& operand) { + Vorr(al, dt, rd, rn, operand); + } + void Vorr(Condition cond, + DRegister rd, + DRegister rn, + const DOperand& operand) { + Vorr(cond, kDataTypeValueNone, rd, rn, operand); + } + void Vorr(DRegister rd, DRegister rn, const DOperand& operand) { + Vorr(al, kDataTypeValueNone, rd, rn, operand); + } + + void Vorr(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vorr(cond, dt, rd, rn, operand); + } + void Vorr(DataType dt, QRegister rd, QRegister rn, const QOperand& operand) { + Vorr(al, dt, rd, rn, operand); + } + void Vorr(Condition cond, + QRegister rd, + QRegister rn, + const QOperand& operand) { + Vorr(cond, kDataTypeValueNone, rd, rn, operand); + } + void Vorr(QRegister rd, QRegister rn, const QOperand& operand) { + Vorr(al, kDataTypeValueNone, rd, rn, operand); + } + + void Vpadal(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpadal(cond, dt, rd, rm); + } + void Vpadal(DataType dt, DRegister rd, DRegister rm) { + Vpadal(al, dt, rd, rm); + } + + void Vpadal(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpadal(cond, dt, rd, rm); + } + void Vpadal(DataType dt, QRegister rd, QRegister rm) { + Vpadal(al, dt, rd, rm); + } + + void Vpadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpadd(cond, dt, rd, rn, rm); + } + void Vpadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vpadd(al, dt, rd, rn, rm); + } + + void Vpaddl(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpaddl(cond, dt, rd, rm); + } + void Vpaddl(DataType dt, DRegister rd, DRegister rm) { + Vpaddl(al, dt, rd, rm); + } + + void Vpaddl(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpaddl(cond, dt, rd, rm); + } + void Vpaddl(DataType dt, QRegister rd, QRegister rm) { + Vpaddl(al, dt, rd, rm); + } + + void Vpmax( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpmax(cond, dt, rd, rn, rm); + } + void Vpmax(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vpmax(al, dt, rd, rn, rm); + } + + void Vpmin( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpmin(cond, dt, rd, rn, rm); + } + void Vpmin(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vpmin(al, dt, rd, rn, rm); + } + + void Vpop(Condition cond, DataType dt, DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpop(cond, dt, dreglist); + } + void Vpop(DataType dt, DRegisterList dreglist) { Vpop(al, dt, dreglist); } + void Vpop(Condition cond, DRegisterList dreglist) { + Vpop(cond, kDataTypeValueNone, dreglist); + } + void Vpop(DRegisterList dreglist) { Vpop(al, kDataTypeValueNone, dreglist); } + + void Vpop(Condition cond, DataType dt, SRegisterList sreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(sreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpop(cond, dt, sreglist); + } + void Vpop(DataType dt, SRegisterList sreglist) { Vpop(al, dt, sreglist); } + void Vpop(Condition cond, SRegisterList sreglist) { + Vpop(cond, kDataTypeValueNone, sreglist); + } + void Vpop(SRegisterList sreglist) { Vpop(al, kDataTypeValueNone, sreglist); } + + void Vpush(Condition cond, DataType dt, DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpush(cond, dt, dreglist); + } + void Vpush(DataType dt, DRegisterList dreglist) { Vpush(al, dt, dreglist); } + void Vpush(Condition cond, DRegisterList dreglist) { + Vpush(cond, kDataTypeValueNone, dreglist); + } + void Vpush(DRegisterList dreglist) { + Vpush(al, kDataTypeValueNone, dreglist); + } + + void Vpush(Condition cond, DataType dt, SRegisterList sreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(sreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vpush(cond, dt, sreglist); + } + void Vpush(DataType dt, SRegisterList sreglist) { Vpush(al, dt, sreglist); } + void Vpush(Condition cond, SRegisterList sreglist) { + Vpush(cond, kDataTypeValueNone, sreglist); + } + void Vpush(SRegisterList sreglist) { + Vpush(al, kDataTypeValueNone, sreglist); + } + + void Vqabs(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqabs(cond, dt, rd, rm); + } + void Vqabs(DataType dt, DRegister rd, DRegister rm) { Vqabs(al, dt, rd, rm); } + + void Vqabs(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqabs(cond, dt, rd, rm); + } + void Vqabs(DataType dt, QRegister rd, QRegister rm) { Vqabs(al, dt, rd, rm); } + + void Vqadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqadd(cond, dt, rd, rn, rm); + } + void Vqadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vqadd(al, dt, rd, rn, rm); + } + + void Vqadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqadd(cond, dt, rd, rn, rm); + } + void Vqadd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vqadd(al, dt, rd, rn, rm); + } + + void Vqdmlal( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmlal(cond, dt, rd, rn, rm); + } + void Vqdmlal(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vqdmlal(al, dt, rd, rn, rm); + } + + void Vqdmlal(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmlal(cond, dt, rd, rn, dm, index); + } + void Vqdmlal( + DataType dt, QRegister rd, DRegister rn, DRegister dm, unsigned index) { + Vqdmlal(al, dt, rd, rn, dm, index); + } + + void Vqdmlsl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmlsl(cond, dt, rd, rn, rm); + } + void Vqdmlsl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vqdmlsl(al, dt, rd, rn, rm); + } + + void Vqdmlsl(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegister dm, + unsigned index) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmlsl(cond, dt, rd, rn, dm, index); + } + void Vqdmlsl( + DataType dt, QRegister rd, DRegister rn, DRegister dm, unsigned index) { + Vqdmlsl(al, dt, rd, rn, dm, index); + } + + void Vqdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmulh(cond, dt, rd, rn, rm); + } + void Vqdmulh(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vqdmulh(al, dt, rd, rn, rm); + } + + void Vqdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmulh(cond, dt, rd, rn, rm); + } + void Vqdmulh(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vqdmulh(al, dt, rd, rn, rm); + } + + void Vqdmulh(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmulh(cond, dt, rd, rn, rm); + } + void Vqdmulh(DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + Vqdmulh(al, dt, rd, rn, rm); + } + + void Vqdmulh(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmulh(cond, dt, rd, rn, rm); + } + void Vqdmulh(DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + Vqdmulh(al, dt, rd, rn, rm); + } + + void Vqdmull( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmull(cond, dt, rd, rn, rm); + } + void Vqdmull(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vqdmull(al, dt, rd, rn, rm); + } + + void Vqdmull(Condition cond, + DataType dt, + QRegister rd, + DRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqdmull(cond, dt, rd, rn, rm); + } + void Vqdmull(DataType dt, QRegister rd, DRegister rn, DRegisterLane rm) { + Vqdmull(al, dt, rd, rn, rm); + } + + void Vqmovn(Condition cond, DataType dt, DRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqmovn(cond, dt, rd, rm); + } + void Vqmovn(DataType dt, DRegister rd, QRegister rm) { + Vqmovn(al, dt, rd, rm); + } + + void Vqmovun(Condition cond, DataType dt, DRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqmovun(cond, dt, rd, rm); + } + void Vqmovun(DataType dt, DRegister rd, QRegister rm) { + Vqmovun(al, dt, rd, rm); + } + + void Vqneg(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqneg(cond, dt, rd, rm); + } + void Vqneg(DataType dt, DRegister rd, DRegister rm) { Vqneg(al, dt, rd, rm); } + + void Vqneg(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqneg(cond, dt, rd, rm); + } + void Vqneg(DataType dt, QRegister rd, QRegister rm) { Vqneg(al, dt, rd, rm); } + + void Vqrdmulh( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqrdmulh(cond, dt, rd, rn, rm); + } + void Vqrdmulh(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vqrdmulh(al, dt, rd, rn, rm); + } + + void Vqrdmulh( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqrdmulh(cond, dt, rd, rn, rm); + } + void Vqrdmulh(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vqrdmulh(al, dt, rd, rn, rm); + } + + void Vqrdmulh(Condition cond, + DataType dt, + DRegister rd, + DRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqrdmulh(cond, dt, rd, rn, rm); + } + void Vqrdmulh(DataType dt, DRegister rd, DRegister rn, DRegisterLane rm) { + Vqrdmulh(al, dt, rd, rn, rm); + } + + void Vqrdmulh(Condition cond, + DataType dt, + QRegister rd, + QRegister rn, + DRegisterLane rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqrdmulh(cond, dt, rd, rn, rm); + } + void Vqrdmulh(DataType dt, QRegister rd, QRegister rn, DRegisterLane rm) { + Vqrdmulh(al, dt, rd, rn, rm); + } + + void Vqrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqrshl(cond, dt, rd, rm, rn); + } + void Vqrshl(DataType dt, DRegister rd, DRegister rm, DRegister rn) { + Vqrshl(al, dt, rd, rm, rn); + } + + void Vqrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqrshl(cond, dt, rd, rm, rn); + } + void Vqrshl(DataType dt, QRegister rd, QRegister rm, QRegister rn) { + Vqrshl(al, dt, rd, rm, rn); + } + + void Vqrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqrshrn(cond, dt, rd, rm, operand); + } + void Vqrshrn(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + Vqrshrn(al, dt, rd, rm, operand); + } + + void Vqrshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqrshrun(cond, dt, rd, rm, operand); + } + void Vqrshrun(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + Vqrshrun(al, dt, rd, rm, operand); + } + + void Vqshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqshl(cond, dt, rd, rm, operand); + } + void Vqshl(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vqshl(al, dt, rd, rm, operand); + } + + void Vqshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqshl(cond, dt, rd, rm, operand); + } + void Vqshl(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vqshl(al, dt, rd, rm, operand); + } + + void Vqshlu(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqshlu(cond, dt, rd, rm, operand); + } + void Vqshlu(DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + Vqshlu(al, dt, rd, rm, operand); + } + + void Vqshlu(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqshlu(cond, dt, rd, rm, operand); + } + void Vqshlu(DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + Vqshlu(al, dt, rd, rm, operand); + } + + void Vqshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqshrn(cond, dt, rd, rm, operand); + } + void Vqshrn(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + Vqshrn(al, dt, rd, rm, operand); + } + + void Vqshrun(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqshrun(cond, dt, rd, rm, operand); + } + void Vqshrun(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + Vqshrun(al, dt, rd, rm, operand); + } + + void Vqsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqsub(cond, dt, rd, rn, rm); + } + void Vqsub(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vqsub(al, dt, rd, rn, rm); + } + + void Vqsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vqsub(cond, dt, rd, rn, rm); + } + void Vqsub(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vqsub(al, dt, rd, rn, rm); + } + + void Vraddhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vraddhn(cond, dt, rd, rn, rm); + } + void Vraddhn(DataType dt, DRegister rd, QRegister rn, QRegister rm) { + Vraddhn(al, dt, rd, rn, rm); + } + + void Vrecpe(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrecpe(cond, dt, rd, rm); + } + void Vrecpe(DataType dt, DRegister rd, DRegister rm) { + Vrecpe(al, dt, rd, rm); + } + + void Vrecpe(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrecpe(cond, dt, rd, rm); + } + void Vrecpe(DataType dt, QRegister rd, QRegister rm) { + Vrecpe(al, dt, rd, rm); + } + + void Vrecps( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrecps(cond, dt, rd, rn, rm); + } + void Vrecps(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vrecps(al, dt, rd, rn, rm); + } + + void Vrecps( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrecps(cond, dt, rd, rn, rm); + } + void Vrecps(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vrecps(al, dt, rd, rn, rm); + } + + void Vrev16(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrev16(cond, dt, rd, rm); + } + void Vrev16(DataType dt, DRegister rd, DRegister rm) { + Vrev16(al, dt, rd, rm); + } + + void Vrev16(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrev16(cond, dt, rd, rm); + } + void Vrev16(DataType dt, QRegister rd, QRegister rm) { + Vrev16(al, dt, rd, rm); + } + + void Vrev32(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrev32(cond, dt, rd, rm); + } + void Vrev32(DataType dt, DRegister rd, DRegister rm) { + Vrev32(al, dt, rd, rm); + } + + void Vrev32(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrev32(cond, dt, rd, rm); + } + void Vrev32(DataType dt, QRegister rd, QRegister rm) { + Vrev32(al, dt, rd, rm); + } + + void Vrev64(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrev64(cond, dt, rd, rm); + } + void Vrev64(DataType dt, DRegister rd, DRegister rm) { + Vrev64(al, dt, rd, rm); + } + + void Vrev64(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrev64(cond, dt, rd, rm); + } + void Vrev64(DataType dt, QRegister rd, QRegister rm) { + Vrev64(al, dt, rd, rm); + } + + void Vrhadd( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrhadd(cond, dt, rd, rn, rm); + } + void Vrhadd(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vrhadd(al, dt, rd, rn, rm); + } + + void Vrhadd( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrhadd(cond, dt, rd, rn, rm); + } + void Vrhadd(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vrhadd(al, dt, rd, rn, rm); + } + + void Vrinta(DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrinta(dt, rd, rm); + } + + void Vrinta(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrinta(dt, rd, rm); + } + + void Vrinta(DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrinta(dt, rd, rm); + } + + void Vrintm(DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintm(dt, rd, rm); + } + + void Vrintm(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintm(dt, rd, rm); + } + + void Vrintm(DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintm(dt, rd, rm); + } + + void Vrintn(DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintn(dt, rd, rm); + } + + void Vrintn(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintn(dt, rd, rm); + } + + void Vrintn(DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintn(dt, rd, rm); + } + + void Vrintp(DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintp(dt, rd, rm); + } + + void Vrintp(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintp(dt, rd, rm); + } + + void Vrintp(DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintp(dt, rd, rm); + } + + void Vrintr(Condition cond, DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrintr(cond, dt, rd, rm); + } + void Vrintr(DataType dt, SRegister rd, SRegister rm) { + Vrintr(al, dt, rd, rm); + } + + void Vrintr(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrintr(cond, dt, rd, rm); + } + void Vrintr(DataType dt, DRegister rd, DRegister rm) { + Vrintr(al, dt, rd, rm); + } + + void Vrintx(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrintx(cond, dt, rd, rm); + } + void Vrintx(DataType dt, DRegister rd, DRegister rm) { + Vrintx(al, dt, rd, rm); + } + + void Vrintx(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintx(dt, rd, rm); + } + + void Vrintx(Condition cond, DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrintx(cond, dt, rd, rm); + } + void Vrintx(DataType dt, SRegister rd, SRegister rm) { + Vrintx(al, dt, rd, rm); + } + + void Vrintz(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrintz(cond, dt, rd, rm); + } + void Vrintz(DataType dt, DRegister rd, DRegister rm) { + Vrintz(al, dt, rd, rm); + } + + void Vrintz(DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vrintz(dt, rd, rm); + } + + void Vrintz(Condition cond, DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrintz(cond, dt, rd, rm); + } + void Vrintz(DataType dt, SRegister rd, SRegister rm) { + Vrintz(al, dt, rd, rm); + } + + void Vrshl( + Condition cond, DataType dt, DRegister rd, DRegister rm, DRegister rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrshl(cond, dt, rd, rm, rn); + } + void Vrshl(DataType dt, DRegister rd, DRegister rm, DRegister rn) { + Vrshl(al, dt, rd, rm, rn); + } + + void Vrshl( + Condition cond, DataType dt, QRegister rd, QRegister rm, QRegister rn) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrshl(cond, dt, rd, rm, rn); + } + void Vrshl(DataType dt, QRegister rd, QRegister rm, QRegister rn) { + Vrshl(al, dt, rd, rm, rn); + } + + void Vrshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrshr(cond, dt, rd, rm, operand); + } + void Vrshr(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vrshr(al, dt, rd, rm, operand); + } + + void Vrshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrshr(cond, dt, rd, rm, operand); + } + void Vrshr(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vrshr(al, dt, rd, rm, operand); + } + + void Vrshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrshrn(cond, dt, rd, rm, operand); + } + void Vrshrn(DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + Vrshrn(al, dt, rd, rm, operand); + } + + void Vrsqrte(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrsqrte(cond, dt, rd, rm); + } + void Vrsqrte(DataType dt, DRegister rd, DRegister rm) { + Vrsqrte(al, dt, rd, rm); + } + + void Vrsqrte(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrsqrte(cond, dt, rd, rm); + } + void Vrsqrte(DataType dt, QRegister rd, QRegister rm) { + Vrsqrte(al, dt, rd, rm); + } + + void Vrsqrts( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrsqrts(cond, dt, rd, rn, rm); + } + void Vrsqrts(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vrsqrts(al, dt, rd, rn, rm); + } + + void Vrsqrts( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrsqrts(cond, dt, rd, rn, rm); + } + void Vrsqrts(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vrsqrts(al, dt, rd, rn, rm); + } + + void Vrsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrsra(cond, dt, rd, rm, operand); + } + void Vrsra(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vrsra(al, dt, rd, rm, operand); + } + + void Vrsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrsra(cond, dt, rd, rm, operand); + } + void Vrsra(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vrsra(al, dt, rd, rm, operand); + } + + void Vrsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vrsubhn(cond, dt, rd, rn, rm); + } + void Vrsubhn(DataType dt, DRegister rd, QRegister rn, QRegister rm) { + Vrsubhn(al, dt, rd, rn, rm); + } + + void Vseleq(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vseleq(dt, rd, rn, rm); + } + + void Vseleq(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vseleq(dt, rd, rn, rm); + } + + void Vselge(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vselge(dt, rd, rn, rm); + } + + void Vselge(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vselge(dt, rd, rn, rm); + } + + void Vselgt(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vselgt(dt, rd, rn, rm); + } + + void Vselgt(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vselgt(dt, rd, rn, rm); + } + + void Vselvs(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vselvs(dt, rd, rn, rm); + } + + void Vselvs(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + vselvs(dt, rd, rn, rm); + } + + void Vshl(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vshl(cond, dt, rd, rm, operand); + } + void Vshl(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vshl(al, dt, rd, rm, operand); + } + + void Vshl(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vshl(cond, dt, rd, rm, operand); + } + void Vshl(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vshl(al, dt, rd, rm, operand); + } + + void Vshll(Condition cond, + DataType dt, + QRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vshll(cond, dt, rd, rm, operand); + } + void Vshll(DataType dt, QRegister rd, DRegister rm, const DOperand& operand) { + Vshll(al, dt, rd, rm, operand); + } + + void Vshr(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vshr(cond, dt, rd, rm, operand); + } + void Vshr(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vshr(al, dt, rd, rm, operand); + } + + void Vshr(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vshr(cond, dt, rd, rm, operand); + } + void Vshr(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vshr(al, dt, rd, rm, operand); + } + + void Vshrn(Condition cond, + DataType dt, + DRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vshrn(cond, dt, rd, rm, operand); + } + void Vshrn(DataType dt, DRegister rd, QRegister rm, const QOperand& operand) { + Vshrn(al, dt, rd, rm, operand); + } + + void Vsli(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsli(cond, dt, rd, rm, operand); + } + void Vsli(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vsli(al, dt, rd, rm, operand); + } + + void Vsli(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsli(cond, dt, rd, rm, operand); + } + void Vsli(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vsli(al, dt, rd, rm, operand); + } + + void Vsqrt(Condition cond, DataType dt, SRegister rd, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsqrt(cond, dt, rd, rm); + } + void Vsqrt(DataType dt, SRegister rd, SRegister rm) { Vsqrt(al, dt, rd, rm); } + + void Vsqrt(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsqrt(cond, dt, rd, rm); + } + void Vsqrt(DataType dt, DRegister rd, DRegister rm) { Vsqrt(al, dt, rd, rm); } + + void Vsra(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsra(cond, dt, rd, rm, operand); + } + void Vsra(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vsra(al, dt, rd, rm, operand); + } + + void Vsra(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsra(cond, dt, rd, rm, operand); + } + void Vsra(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vsra(al, dt, rd, rm, operand); + } + + void Vsri(Condition cond, + DataType dt, + DRegister rd, + DRegister rm, + const DOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsri(cond, dt, rd, rm, operand); + } + void Vsri(DataType dt, DRegister rd, DRegister rm, const DOperand& operand) { + Vsri(al, dt, rd, rm, operand); + } + + void Vsri(Condition cond, + DataType dt, + QRegister rd, + QRegister rm, + const QOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsri(cond, dt, rd, rm, operand); + } + void Vsri(DataType dt, QRegister rd, QRegister rm, const QOperand& operand) { + Vsri(al, dt, rd, rm, operand); + } + + void Vst1(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vst1(cond, dt, nreglist, operand); + } + void Vst1(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + Vst1(al, dt, nreglist, operand); + } + + void Vst2(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vst2(cond, dt, nreglist, operand); + } + void Vst2(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + Vst2(al, dt, nreglist, operand); + } + + void Vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vst3(cond, dt, nreglist, operand); + } + void Vst3(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + Vst3(al, dt, nreglist, operand); + } + + void Vst3(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vst3(cond, dt, nreglist, operand); + } + void Vst3(DataType dt, + const NeonRegisterList& nreglist, + const MemOperand& operand) { + Vst3(al, dt, nreglist, operand); + } + + void Vst4(Condition cond, + DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vst4(cond, dt, nreglist, operand); + } + void Vst4(DataType dt, + const NeonRegisterList& nreglist, + const AlignedMemOperand& operand) { + Vst4(al, dt, nreglist, operand); + } + + void Vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vstm(cond, dt, rn, write_back, dreglist); + } + void Vstm(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vstm(al, dt, rn, write_back, dreglist); + } + void Vstm(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vstm(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + void Vstm(Register rn, WriteBack write_back, DRegisterList dreglist) { + Vstm(al, kDataTypeValueNone, rn, write_back, dreglist); + } + + void Vstm(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(sreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vstm(cond, dt, rn, write_back, sreglist); + } + void Vstm(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vstm(al, dt, rn, write_back, sreglist); + } + void Vstm(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vstm(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + void Vstm(Register rn, WriteBack write_back, SRegisterList sreglist) { + Vstm(al, kDataTypeValueNone, rn, write_back, sreglist); + } + + void Vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vstmdb(cond, dt, rn, write_back, dreglist); + } + void Vstmdb(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vstmdb(al, dt, rn, write_back, dreglist); + } + void Vstmdb(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vstmdb(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + void Vstmdb(Register rn, WriteBack write_back, DRegisterList dreglist) { + Vstmdb(al, kDataTypeValueNone, rn, write_back, dreglist); + } + + void Vstmdb(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(sreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vstmdb(cond, dt, rn, write_back, sreglist); + } + void Vstmdb(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vstmdb(al, dt, rn, write_back, sreglist); + } + void Vstmdb(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vstmdb(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + void Vstmdb(Register rn, WriteBack write_back, SRegisterList sreglist) { + Vstmdb(al, kDataTypeValueNone, rn, write_back, sreglist); + } + + void Vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(dreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vstmia(cond, dt, rn, write_back, dreglist); + } + void Vstmia(DataType dt, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vstmia(al, dt, rn, write_back, dreglist); + } + void Vstmia(Condition cond, + Register rn, + WriteBack write_back, + DRegisterList dreglist) { + Vstmia(cond, kDataTypeValueNone, rn, write_back, dreglist); + } + void Vstmia(Register rn, WriteBack write_back, DRegisterList dreglist) { + Vstmia(al, kDataTypeValueNone, rn, write_back, dreglist); + } + + void Vstmia(Condition cond, + DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(sreglist)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vstmia(cond, dt, rn, write_back, sreglist); + } + void Vstmia(DataType dt, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vstmia(al, dt, rn, write_back, sreglist); + } + void Vstmia(Condition cond, + Register rn, + WriteBack write_back, + SRegisterList sreglist) { + Vstmia(cond, kDataTypeValueNone, rn, write_back, sreglist); + } + void Vstmia(Register rn, WriteBack write_back, SRegisterList sreglist) { + Vstmia(al, kDataTypeValueNone, rn, write_back, sreglist); + } + + void Vstr(Condition cond, + DataType dt, + DRegister rd, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vstr(cond, dt, rd, operand); + } + void Vstr(DataType dt, DRegister rd, const MemOperand& operand) { + Vstr(al, dt, rd, operand); + } + void Vstr(Condition cond, DRegister rd, const MemOperand& operand) { + Vstr(cond, Untyped64, rd, operand); + } + void Vstr(DRegister rd, const MemOperand& operand) { + Vstr(al, Untyped64, rd, operand); + } + + void Vstr(Condition cond, + DataType dt, + SRegister rd, + const MemOperand& operand) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(operand)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vstr(cond, dt, rd, operand); + } + void Vstr(DataType dt, SRegister rd, const MemOperand& operand) { + Vstr(al, dt, rd, operand); + } + void Vstr(Condition cond, SRegister rd, const MemOperand& operand) { + Vstr(cond, Untyped32, rd, operand); + } + void Vstr(SRegister rd, const MemOperand& operand) { + Vstr(al, Untyped32, rd, operand); + } + + void Vsub( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsub(cond, dt, rd, rn, rm); + } + void Vsub(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vsub(al, dt, rd, rn, rm); + } + + void Vsub( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsub(cond, dt, rd, rn, rm); + } + void Vsub(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vsub(al, dt, rd, rn, rm); + } + + void Vsub( + Condition cond, DataType dt, SRegister rd, SRegister rn, SRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsub(cond, dt, rd, rn, rm); + } + void Vsub(DataType dt, SRegister rd, SRegister rn, SRegister rm) { + Vsub(al, dt, rd, rn, rm); + } + + void Vsubhn( + Condition cond, DataType dt, DRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsubhn(cond, dt, rd, rn, rm); + } + void Vsubhn(DataType dt, DRegister rd, QRegister rn, QRegister rm) { + Vsubhn(al, dt, rd, rn, rm); + } + + void Vsubl( + Condition cond, DataType dt, QRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsubl(cond, dt, rd, rn, rm); + } + void Vsubl(DataType dt, QRegister rd, DRegister rn, DRegister rm) { + Vsubl(al, dt, rd, rn, rm); + } + + void Vsubw( + Condition cond, DataType dt, QRegister rd, QRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vsubw(cond, dt, rd, rn, rm); + } + void Vsubw(DataType dt, QRegister rd, QRegister rn, DRegister rm) { + Vsubw(al, dt, rd, rn, rm); + } + + void Vswp(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vswp(cond, dt, rd, rm); + } + void Vswp(DataType dt, DRegister rd, DRegister rm) { Vswp(al, dt, rd, rm); } + void Vswp(Condition cond, DRegister rd, DRegister rm) { + Vswp(cond, kDataTypeValueNone, rd, rm); + } + void Vswp(DRegister rd, DRegister rm) { + Vswp(al, kDataTypeValueNone, rd, rm); + } + + void Vswp(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vswp(cond, dt, rd, rm); + } + void Vswp(DataType dt, QRegister rd, QRegister rm) { Vswp(al, dt, rd, rm); } + void Vswp(Condition cond, QRegister rd, QRegister rm) { + Vswp(cond, kDataTypeValueNone, rd, rm); + } + void Vswp(QRegister rd, QRegister rm) { + Vswp(al, kDataTypeValueNone, rd, rm); + } + + void Vtbl(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vtbl(cond, dt, rd, nreglist, rm); + } + void Vtbl(DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + Vtbl(al, dt, rd, nreglist, rm); + } + + void Vtbx(Condition cond, + DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(nreglist)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vtbx(cond, dt, rd, nreglist, rm); + } + void Vtbx(DataType dt, + DRegister rd, + const NeonRegisterList& nreglist, + DRegister rm) { + Vtbx(al, dt, rd, nreglist, rm); + } + + void Vtrn(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vtrn(cond, dt, rd, rm); + } + void Vtrn(DataType dt, DRegister rd, DRegister rm) { Vtrn(al, dt, rd, rm); } + + void Vtrn(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vtrn(cond, dt, rd, rm); + } + void Vtrn(DataType dt, QRegister rd, QRegister rm) { Vtrn(al, dt, rd, rm); } + + void Vtst( + Condition cond, DataType dt, DRegister rd, DRegister rn, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vtst(cond, dt, rd, rn, rm); + } + void Vtst(DataType dt, DRegister rd, DRegister rn, DRegister rm) { + Vtst(al, dt, rd, rn, rm); + } + + void Vtst( + Condition cond, DataType dt, QRegister rd, QRegister rn, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rn)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vtst(cond, dt, rd, rn, rm); + } + void Vtst(DataType dt, QRegister rd, QRegister rn, QRegister rm) { + Vtst(al, dt, rd, rn, rm); + } + + void Vuzp(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vuzp(cond, dt, rd, rm); + } + void Vuzp(DataType dt, DRegister rd, DRegister rm) { Vuzp(al, dt, rd, rm); } + + void Vuzp(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vuzp(cond, dt, rd, rm); + } + void Vuzp(DataType dt, QRegister rd, QRegister rm) { Vuzp(al, dt, rd, rm); } + + void Vzip(Condition cond, DataType dt, DRegister rd, DRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vzip(cond, dt, rd, rm); + } + void Vzip(DataType dt, DRegister rd, DRegister rm) { Vzip(al, dt, rd, rm); } + + void Vzip(Condition cond, DataType dt, QRegister rd, QRegister rm) { + VIXL_ASSERT(!AliasesAvailableScratchRegister(rd)); + VIXL_ASSERT(!AliasesAvailableScratchRegister(rm)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + vzip(cond, dt, rd, rm); + } + void Vzip(DataType dt, QRegister rd, QRegister rm) { Vzip(al, dt, rd, rm); } + + void Yield(Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(OutsideITBlock()); + MacroEmissionCheckScope guard(this); + ITScope it_scope(this, &cond, guard); + yield(cond); + } + void Yield() { Yield(al); } + void Vabs(Condition cond, VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vabs(cond, F32, rd.S(), rm.S()); + } else { + Vabs(cond, F64, rd.D(), rm.D()); + } + } + void Vabs(VRegister rd, VRegister rm) { Vabs(al, rd, rm); } + void Vadd(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vadd(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vadd(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vadd(VRegister rd, VRegister rn, VRegister rm) { Vadd(al, rd, rn, rm); } + void Vcmp(Condition cond, VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vcmp(cond, F32, rd.S(), rm.S()); + } else { + Vcmp(cond, F64, rd.D(), rm.D()); + } + } + void Vcmp(VRegister rd, VRegister rm) { Vcmp(al, rd, rm); } + void Vcmpe(Condition cond, VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vcmpe(cond, F32, rd.S(), rm.S()); + } else { + Vcmpe(cond, F64, rd.D(), rm.D()); + } + } + void Vcmpe(VRegister rd, VRegister rm) { Vcmpe(al, rd, rm); } + void Vdiv(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vdiv(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vdiv(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vdiv(VRegister rd, VRegister rn, VRegister rm) { Vdiv(al, rd, rn, rm); } + void Vfma(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vfma(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vfma(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vfma(VRegister rd, VRegister rn, VRegister rm) { Vfma(al, rd, rn, rm); } + void Vfms(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vfms(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vfms(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vfms(VRegister rd, VRegister rn, VRegister rm) { Vfms(al, rd, rn, rm); } + void Vfnma(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vfnma(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vfnma(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vfnma(VRegister rd, VRegister rn, VRegister rm) { + Vfnma(al, rd, rn, rm); + } + void Vfnms(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vfnms(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vfnms(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vfnms(VRegister rd, VRegister rn, VRegister rm) { + Vfnms(al, rd, rn, rm); + } + void Vmaxnm(VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vmaxnm(F32, rd.S(), rn.S(), rm.S()); + } else { + Vmaxnm(F64, rd.D(), rn.D(), rm.D()); + } + } + void Vminnm(VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vminnm(F32, rd.S(), rn.S(), rm.S()); + } else { + Vminnm(F64, rd.D(), rn.D(), rm.D()); + } + } + void Vmla(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vmla(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vmla(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vmla(VRegister rd, VRegister rn, VRegister rm) { Vmla(al, rd, rn, rm); } + void Vmls(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vmls(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vmls(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vmls(VRegister rd, VRegister rn, VRegister rm) { Vmls(al, rd, rn, rm); } + void Vmov(Condition cond, VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vmov(cond, F32, rd.S(), rm.S()); + } else { + Vmov(cond, F64, rd.D(), rm.D()); + } + } + void Vmov(VRegister rd, VRegister rm) { Vmov(al, rd, rm); } + void Vmul(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vmul(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vmul(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vmul(VRegister rd, VRegister rn, VRegister rm) { Vmul(al, rd, rn, rm); } + void Vneg(Condition cond, VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vneg(cond, F32, rd.S(), rm.S()); + } else { + Vneg(cond, F64, rd.D(), rm.D()); + } + } + void Vneg(VRegister rd, VRegister rm) { Vneg(al, rd, rm); } + void Vnmla(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vnmla(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vnmla(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vnmla(VRegister rd, VRegister rn, VRegister rm) { + Vnmla(al, rd, rn, rm); + } + void Vnmls(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vnmls(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vnmls(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vnmls(VRegister rd, VRegister rn, VRegister rm) { + Vnmls(al, rd, rn, rm); + } + void Vnmul(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vnmul(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vnmul(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vnmul(VRegister rd, VRegister rn, VRegister rm) { + Vnmul(al, rd, rn, rm); + } + void Vrinta(VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vrinta(F32, rd.S(), rm.S()); + } else { + Vrinta(F64, rd.D(), rm.D()); + } + } + void Vrintm(VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vrintm(F32, rd.S(), rm.S()); + } else { + Vrintm(F64, rd.D(), rm.D()); + } + } + void Vrintn(VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vrintn(F32, rd.S(), rm.S()); + } else { + Vrintn(F64, rd.D(), rm.D()); + } + } + void Vrintp(VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vrintp(F32, rd.S(), rm.S()); + } else { + Vrintp(F64, rd.D(), rm.D()); + } + } + void Vrintr(Condition cond, VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vrintr(cond, F32, rd.S(), rm.S()); + } else { + Vrintr(cond, F64, rd.D(), rm.D()); + } + } + void Vrintr(VRegister rd, VRegister rm) { Vrintr(al, rd, rm); } + void Vrintx(Condition cond, VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vrintx(cond, F32, rd.S(), rm.S()); + } else { + Vrintx(cond, F64, rd.D(), rm.D()); + } + } + void Vrintx(VRegister rd, VRegister rm) { Vrintx(al, rd, rm); } + void Vrintz(Condition cond, VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vrintz(cond, F32, rd.S(), rm.S()); + } else { + Vrintz(cond, F64, rd.D(), rm.D()); + } + } + void Vrintz(VRegister rd, VRegister rm) { Vrintz(al, rd, rm); } + void Vseleq(VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vseleq(F32, rd.S(), rn.S(), rm.S()); + } else { + Vseleq(F64, rd.D(), rn.D(), rm.D()); + } + } + void Vselge(VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vselge(F32, rd.S(), rn.S(), rm.S()); + } else { + Vselge(F64, rd.D(), rn.D(), rm.D()); + } + } + void Vselgt(VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vselgt(F32, rd.S(), rn.S(), rm.S()); + } else { + Vselgt(F64, rd.D(), rn.D(), rm.D()); + } + } + void Vselvs(VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vselvs(F32, rd.S(), rn.S(), rm.S()); + } else { + Vselvs(F64, rd.D(), rn.D(), rm.D()); + } + } + void Vsqrt(Condition cond, VRegister rd, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vsqrt(cond, F32, rd.S(), rm.S()); + } else { + Vsqrt(cond, F64, rd.D(), rm.D()); + } + } + void Vsqrt(VRegister rd, VRegister rm) { Vsqrt(al, rd, rm); } + void Vsub(Condition cond, VRegister rd, VRegister rn, VRegister rm) { + VIXL_ASSERT(rd.IsS() || rd.IsD()); + VIXL_ASSERT(rd.GetType() == rn.GetType()); + VIXL_ASSERT(rd.GetType() == rm.GetType()); + if (rd.IsS()) { + Vsub(cond, F32, rd.S(), rn.S(), rm.S()); + } else { + Vsub(cond, F64, rd.D(), rn.D(), rm.D()); + } + } + void Vsub(VRegister rd, VRegister rn, VRegister rm) { Vsub(al, rd, rn, rm); } + // End of generated code. + + virtual bool AllowUnpredictable() VIXL_OVERRIDE { + VIXL_ABORT_WITH_MSG("Unpredictable instruction.\n"); + return false; + } + virtual bool AllowStronglyDiscouraged() VIXL_OVERRIDE { + VIXL_ABORT_WITH_MSG( + "ARM strongly recommends to not use this instruction.\n"); + return false; + } + // Old syntax of vrint instructions. + VIXL_DEPRECATED( + "void Vrinta(DataType dt, DRegister rd, DRegister rm)", + void Vrinta(DataType dt1, DataType dt2, DRegister rd, DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrinta(dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrinta(DataType dt, QRegister rd, QRegister rm)", + void Vrinta(DataType dt1, DataType dt2, QRegister rd, QRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrinta(dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrinta(DataType dt, SRegister rd, SRegister rm)", + void Vrinta(DataType dt1, DataType dt2, SRegister rd, SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrinta(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintm(DataType dt, DRegister rd, DRegister rm)", + void Vrintm(DataType dt1, DataType dt2, DRegister rd, DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintm(dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintm(DataType dt, QRegister rd, QRegister rm)", + void Vrintm(DataType dt1, DataType dt2, QRegister rd, QRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintm(dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintm(DataType dt, SRegister rd, SRegister rm)", + void Vrintm(DataType dt1, DataType dt2, SRegister rd, SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintm(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintn(DataType dt, DRegister rd, DRegister rm)", + void Vrintn(DataType dt1, DataType dt2, DRegister rd, DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintn(dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintn(DataType dt, QRegister rd, QRegister rm)", + void Vrintn(DataType dt1, DataType dt2, QRegister rd, QRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintn(dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintn(DataType dt, SRegister rd, SRegister rm)", + void Vrintn(DataType dt1, DataType dt2, SRegister rd, SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintn(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintp(DataType dt, DRegister rd, DRegister rm)", + void Vrintp(DataType dt1, DataType dt2, DRegister rd, DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintp(dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintp(DataType dt, QRegister rd, QRegister rm)", + void Vrintp(DataType dt1, DataType dt2, QRegister rd, QRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintp(dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintp(DataType dt, SRegister rd, SRegister rm)", + void Vrintp(DataType dt1, DataType dt2, SRegister rd, SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintp(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintr(Condition cond, DataType dt, SRegister rd, SRegister rm)", + void Vrintr(Condition cond, + DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintr(cond, dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintr(DataType dt, SRegister rd, SRegister rm)", + void Vrintr(DataType dt1, DataType dt2, SRegister rd, SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintr(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintr(Condition cond, DataType dt, DRegister rd, DRegister rm)", + void Vrintr(Condition cond, + DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintr(cond, dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintr(DataType dt, DRegister rd, DRegister rm)", + void Vrintr(DataType dt1, DataType dt2, DRegister rd, DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintr(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintx(Condition cond, DataType dt, DRegister rd, DRegister rm)", + void Vrintx(Condition cond, + DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintx(cond, dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintx(DataType dt, DRegister rd, DRegister rm)", + void Vrintx(DataType dt1, DataType dt2, DRegister rd, DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintx(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintx(DataType dt, QRegister rd, QRegister rm)", + void Vrintx(DataType dt1, DataType dt2, QRegister rd, QRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintx(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintx(Condition cond, DataType dt, SRegister rd, SRegister rm)", + void Vrintx(Condition cond, + DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintx(cond, dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintx(DataType dt, SRegister rd, SRegister rm)", + void Vrintx(DataType dt1, DataType dt2, SRegister rd, SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintx(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintz(Condition cond, DataType dt, DRegister rd, DRegister rm)", + void Vrintz(Condition cond, + DataType dt1, + DataType dt2, + DRegister rd, + DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintz(cond, dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintz(DataType dt, DRegister rd, DRegister rm)", + void Vrintz(DataType dt1, DataType dt2, DRegister rd, DRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintz(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintz(DataType dt, QRegister rd, QRegister rm)", + void Vrintz(DataType dt1, DataType dt2, QRegister rd, QRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintz(dt1, rd, rm); + } + + VIXL_DEPRECATED( + "void Vrintz(Condition cond, DataType dt, SRegister rd, SRegister rm)", + void Vrintz(Condition cond, + DataType dt1, + DataType dt2, + SRegister rd, + SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintz(cond, dt1, rd, rm); + } + VIXL_DEPRECATED( + "void Vrintz(DataType dt, SRegister rd, SRegister rm)", + void Vrintz(DataType dt1, DataType dt2, SRegister rd, SRegister rm)) { + USE(dt2); + VIXL_ASSERT(dt1.Is(dt2)); + return Vrintz(dt1, rd, rm); + } + + private: + bool NeedBranch(Condition* cond) { return !cond->Is(al) && IsUsingT32(); } + static const int kBranchSize = kMaxInstructionSizeInBytes; + + RegisterList available_; + VRegisterList available_vfp_; + UseScratchRegisterScope* current_scratch_scope_; + MacroAssemblerContext context_; + PoolManager pool_manager_; + bool generate_simulator_code_; + bool allow_macro_instructions_; + Label* pool_end_; + + friend class TestMacroAssembler; +}; + +// This scope utility allows scratch registers to be managed safely. The +// MacroAssembler's GetScratchRegisterList() is used as a pool of scratch +// registers. These registers can be allocated on demand, and will be returned +// at the end of the scope. +// +// When the scope ends, the MacroAssembler's lists will be restored to their +// original state, even if the lists were modified by some other means. +// +// Scopes must nest perfectly. That is, they must be destructed in reverse +// construction order. Otherwise, it is not clear how to handle cases where one +// scope acquires a register that was included in a now-closing scope. With +// perfect nesting, this cannot occur. +class UseScratchRegisterScope { + public: + // This constructor implicitly calls the `Open` function to initialise the + // scope, so it is ready to use immediately after it has been constructed. + explicit UseScratchRegisterScope(MacroAssembler* masm) + : masm_(NULL), parent_(NULL), old_available_(0), old_available_vfp_(0) { + Open(masm); + } + // This constructor allows deferred and optional initialisation of the scope. + // The user is required to explicitly call the `Open` function before using + // the scope. + UseScratchRegisterScope() + : masm_(NULL), parent_(NULL), old_available_(0), old_available_vfp_(0) {} + + // This function performs the actual initialisation work. + void Open(MacroAssembler* masm); + + // The destructor always implicitly calls the `Close` function. + ~UseScratchRegisterScope() { Close(); } + + // This function performs the cleaning-up work. It must succeed even if the + // scope has not been opened. It is safe to call multiple times. + void Close(); + + bool IsAvailable(const Register& reg) const; + bool IsAvailable(const VRegister& reg) const; + + // Take a register from the temp list. It will be returned automatically when + // the scope ends. + Register Acquire(); + VRegister AcquireV(unsigned size_in_bits); + QRegister AcquireQ(); + DRegister AcquireD(); + SRegister AcquireS(); + + // Explicitly release an acquired (or excluded) register, putting it back in + // the temp list. + void Release(const Register& reg); + void Release(const VRegister& reg); + + // Make the specified registers available as scratch registers for the + // duration of this scope. + void Include(const RegisterList& list); + void Include(const Register& reg1, + const Register& reg2 = NoReg, + const Register& reg3 = NoReg, + const Register& reg4 = NoReg) { + Include(RegisterList(reg1, reg2, reg3, reg4)); + } + void Include(const VRegisterList& list); + void Include(const VRegister& reg1, + const VRegister& reg2 = NoVReg, + const VRegister& reg3 = NoVReg, + const VRegister& reg4 = NoVReg) { + Include(VRegisterList(reg1, reg2, reg3, reg4)); + } + + // Make sure that the specified registers are not available in this scope. + // This can be used to prevent helper functions from using sensitive + // registers, for example. + void Exclude(const RegisterList& list); + void Exclude(const Register& reg1, + const Register& reg2 = NoReg, + const Register& reg3 = NoReg, + const Register& reg4 = NoReg) { + Exclude(RegisterList(reg1, reg2, reg3, reg4)); + } + void Exclude(const VRegisterList& list); + void Exclude(const VRegister& reg1, + const VRegister& reg2 = NoVReg, + const VRegister& reg3 = NoVReg, + const VRegister& reg4 = NoVReg) { + Exclude(VRegisterList(reg1, reg2, reg3, reg4)); + } + + // A convenience helper to exclude any registers used by the operand. + void Exclude(const Operand& operand); + + // Prevent any scratch registers from being used in this scope. + void ExcludeAll(); + + private: + // The MacroAssembler maintains a list of available scratch registers, and + // also keeps track of the most recently-opened scope so that on destruction + // we can check that scopes do not outlive their parents. + MacroAssembler* masm_; + UseScratchRegisterScope* parent_; + + // The state of the available lists at the start of this scope. + uint32_t old_available_; // kRRegister + uint64_t old_available_vfp_; // kVRegister + + VIXL_DEBUG_NO_RETURN UseScratchRegisterScope(const UseScratchRegisterScope&) { + VIXL_UNREACHABLE(); + } + VIXL_DEBUG_NO_RETURN void operator=(const UseScratchRegisterScope&) { + VIXL_UNREACHABLE(); + } +}; + + +} // namespace aarch32 +} // namespace vixl + +#endif // VIXL_AARCH32_MACRO_ASSEMBLER_AARCH32_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch32/operands-aarch32.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/operands-aarch32.cc new file mode 100644 index 00000000..bbfda159 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/operands-aarch32.cc @@ -0,0 +1,563 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +extern "C" { +#include +#include +} + +#include +#include +#include +#include +#include +#include +#include + +#include "../utils-vixl.h" +#include "aarch32/constants-aarch32.h" +#include "aarch32/instructions-aarch32.h" +#include "aarch32/operands-aarch32.h" + +namespace vixl { +namespace aarch32 { + +// Operand + +std::ostream& operator<<(std::ostream& os, const Operand& operand) { + if (operand.IsImmediate()) { + return os << "#" << operand.GetImmediate(); + } + if (operand.IsImmediateShiftedRegister()) { + if ((operand.GetShift().IsLSL() || operand.GetShift().IsROR()) && + (operand.GetShiftAmount() == 0)) { + return os << operand.GetBaseRegister(); + } + if (operand.GetShift().IsRRX()) { + return os << operand.GetBaseRegister() << ", rrx"; + } + return os << operand.GetBaseRegister() << ", " << operand.GetShift() << " #" + << operand.GetShiftAmount(); + } + if (operand.IsRegisterShiftedRegister()) { + return os << operand.GetBaseRegister() << ", " << operand.GetShift() << " " + << operand.GetShiftRegister(); + } + VIXL_UNREACHABLE(); + return os; +} + +std::ostream& operator<<(std::ostream& os, const NeonImmediate& neon_imm) { + if (neon_imm.IsDouble()) { + if (neon_imm.imm_.d_ == 0) { + if (copysign(1.0, neon_imm.imm_.d_) < 0.0) { + return os << "#-0.0"; + } + return os << "#0.0"; + } + return os << "#" << std::setprecision(9) << neon_imm.imm_.d_; + } + if (neon_imm.IsFloat()) { + if (neon_imm.imm_.f_ == 0) { + if (copysign(1.0, neon_imm.imm_.d_) < 0.0) return os << "#-0.0"; + return os << "#0.0"; + } + return os << "#" << std::setprecision(9) << neon_imm.imm_.f_; + } + if (neon_imm.IsInteger64()) { + return os << "#0x" << std::hex << std::setw(16) << std::setfill('0') + << neon_imm.imm_.u64_ << std::dec; + } + return os << "#" << neon_imm.imm_.u32_; +} + +// SOperand + +std::ostream& operator<<(std::ostream& os, const SOperand& operand) { + if (operand.IsImmediate()) { + return os << operand.GetNeonImmediate(); + } + return os << operand.GetRegister(); +} + +// DOperand + +std::ostream& operator<<(std::ostream& os, const DOperand& operand) { + if (operand.IsImmediate()) { + return os << operand.GetNeonImmediate(); + } + return os << operand.GetRegister(); +} + +// QOperand + +std::ostream& operator<<(std::ostream& os, const QOperand& operand) { + if (operand.IsImmediate()) { + return os << operand.GetNeonImmediate(); + } + return os << operand.GetRegister(); +} + + +ImmediateVbic::ImmediateVbic(DataType dt, const NeonImmediate& neon_imm) { + if (neon_imm.IsInteger32()) { + uint32_t immediate = neon_imm.GetImmediate(); + if (dt.GetValue() == I16) { + if ((immediate & ~0xff) == 0) { + SetEncodingValue(0x9); + SetEncodedImmediate(immediate); + } else if ((immediate & ~0xff00) == 0) { + SetEncodingValue(0xb); + SetEncodedImmediate(immediate >> 8); + } + } else if (dt.GetValue() == I32) { + if ((immediate & ~0xff) == 0) { + SetEncodingValue(0x1); + SetEncodedImmediate(immediate); + } else if ((immediate & ~0xff00) == 0) { + SetEncodingValue(0x3); + SetEncodedImmediate(immediate >> 8); + } else if ((immediate & ~0xff0000) == 0) { + SetEncodingValue(0x5); + SetEncodedImmediate(immediate >> 16); + } else if ((immediate & ~0xff000000) == 0) { + SetEncodingValue(0x7); + SetEncodedImmediate(immediate >> 24); + } + } + } +} + + +DataType ImmediateVbic::DecodeDt(uint32_t cmode) { + switch (cmode) { + case 0x1: + case 0x3: + case 0x5: + case 0x7: + return I32; + case 0x9: + case 0xb: + return I16; + default: + break; + } + VIXL_UNREACHABLE(); + return kDataTypeValueInvalid; +} + + +NeonImmediate ImmediateVbic::DecodeImmediate(uint32_t cmode, + uint32_t immediate) { + switch (cmode) { + case 0x1: + case 0x9: + return immediate; + case 0x3: + case 0xb: + return immediate << 8; + case 0x5: + return immediate << 16; + case 0x7: + return immediate << 24; + default: + break; + } + VIXL_UNREACHABLE(); + return 0; +} + + +ImmediateVmov::ImmediateVmov(DataType dt, const NeonImmediate& neon_imm) { + if (neon_imm.IsInteger()) { + switch (dt.GetValue()) { + case I8: + if (neon_imm.CanConvert()) { + SetEncodingValue(0xe); + SetEncodedImmediate(neon_imm.GetImmediate()); + } + break; + case I16: + if (neon_imm.IsInteger32()) { + uint32_t immediate = neon_imm.GetImmediate(); + if ((immediate & ~0xff) == 0) { + SetEncodingValue(0x8); + SetEncodedImmediate(immediate); + } else if ((immediate & ~0xff00) == 0) { + SetEncodingValue(0xa); + SetEncodedImmediate(immediate >> 8); + } + } + break; + case I32: + if (neon_imm.IsInteger32()) { + uint32_t immediate = neon_imm.GetImmediate(); + if ((immediate & ~0xff) == 0) { + SetEncodingValue(0x0); + SetEncodedImmediate(immediate); + } else if ((immediate & ~0xff00) == 0) { + SetEncodingValue(0x2); + SetEncodedImmediate(immediate >> 8); + } else if ((immediate & ~0xff0000) == 0) { + SetEncodingValue(0x4); + SetEncodedImmediate(immediate >> 16); + } else if ((immediate & ~0xff000000) == 0) { + SetEncodingValue(0x6); + SetEncodedImmediate(immediate >> 24); + } else if ((immediate & ~0xff00) == 0xff) { + SetEncodingValue(0xc); + SetEncodedImmediate(immediate >> 8); + } else if ((immediate & ~0xff0000) == 0xffff) { + SetEncodingValue(0xd); + SetEncodedImmediate(immediate >> 16); + } + } + break; + case I64: { + bool is_valid = true; + uint32_t encoding = 0; + if (neon_imm.IsInteger32()) { + uint32_t immediate = neon_imm.GetImmediate(); + uint32_t mask = 0xff000000; + for (uint32_t set_bit = 1 << 3; set_bit != 0; set_bit >>= 1) { + if ((immediate & mask) == mask) { + encoding |= set_bit; + } else if ((immediate & mask) != 0) { + is_valid = false; + break; + } + mask >>= 8; + } + } else { + uint64_t immediate = neon_imm.GetImmediate(); + uint64_t mask = UINT64_C(0xff) << 56; + for (uint32_t set_bit = 1 << 7; set_bit != 0; set_bit >>= 1) { + if ((immediate & mask) == mask) { + encoding |= set_bit; + } else if ((immediate & mask) != 0) { + is_valid = false; + break; + } + mask >>= 8; + } + } + if (is_valid) { + SetEncodingValue(0x1e); + SetEncodedImmediate(encoding); + } + break; + } + default: + break; + } + } else { + switch (dt.GetValue()) { + case F32: + if (neon_imm.IsFloat() || neon_imm.IsDouble()) { + ImmediateVFP vfp(neon_imm.GetImmediate()); + if (vfp.IsValid()) { + SetEncodingValue(0xf); + SetEncodedImmediate(vfp.GetEncodingValue()); + } + } + break; + default: + break; + } + } +} + + +DataType ImmediateVmov::DecodeDt(uint32_t cmode) { + switch (cmode & 0xf) { + case 0x0: + case 0x2: + case 0x4: + case 0x6: + case 0xc: + case 0xd: + return I32; + case 0x8: + case 0xa: + return I16; + case 0xe: + return ((cmode & 0x10) == 0) ? I8 : I64; + case 0xf: + if ((cmode & 0x10) == 0) return F32; + break; + default: + break; + } + VIXL_UNREACHABLE(); + return kDataTypeValueInvalid; +} + + +NeonImmediate ImmediateVmov::DecodeImmediate(uint32_t cmode, + uint32_t immediate) { + switch (cmode & 0xf) { + case 0x8: + case 0x0: + return immediate; + case 0x2: + case 0xa: + return immediate << 8; + case 0x4: + return immediate << 16; + case 0x6: + return immediate << 24; + case 0xc: + return (immediate << 8) | 0xff; + case 0xd: + return (immediate << 16) | 0xffff; + case 0xe: { + if (cmode == 0x1e) { + uint64_t encoding = 0; + for (uint32_t set_bit = 1 << 7; set_bit != 0; set_bit >>= 1) { + encoding <<= 8; + if ((immediate & set_bit) != 0) { + encoding |= 0xff; + } + } + return encoding; + } else { + return immediate; + } + } + case 0xf: { + return ImmediateVFP::Decode(immediate); + } + default: + break; + } + VIXL_UNREACHABLE(); + return 0; +} + + +ImmediateVmvn::ImmediateVmvn(DataType dt, const NeonImmediate& neon_imm) { + if (neon_imm.IsInteger32()) { + uint32_t immediate = neon_imm.GetImmediate(); + switch (dt.GetValue()) { + case I16: + if ((immediate & ~0xff) == 0) { + SetEncodingValue(0x8); + SetEncodedImmediate(immediate); + } else if ((immediate & ~0xff00) == 0) { + SetEncodingValue(0xa); + SetEncodedImmediate(immediate >> 8); + } + break; + case I32: + if ((immediate & ~0xff) == 0) { + SetEncodingValue(0x0); + SetEncodedImmediate(immediate); + } else if ((immediate & ~0xff00) == 0) { + SetEncodingValue(0x2); + SetEncodedImmediate(immediate >> 8); + } else if ((immediate & ~0xff0000) == 0) { + SetEncodingValue(0x4); + SetEncodedImmediate(immediate >> 16); + } else if ((immediate & ~0xff000000) == 0) { + SetEncodingValue(0x6); + SetEncodedImmediate(immediate >> 24); + } else if ((immediate & ~0xff00) == 0xff) { + SetEncodingValue(0xc); + SetEncodedImmediate(immediate >> 8); + } else if ((immediate & ~0xff0000) == 0xffff) { + SetEncodingValue(0xd); + SetEncodedImmediate(immediate >> 16); + } + break; + default: + break; + } + } +} + + +DataType ImmediateVmvn::DecodeDt(uint32_t cmode) { + switch (cmode) { + case 0x0: + case 0x2: + case 0x4: + case 0x6: + case 0xc: + case 0xd: + return I32; + case 0x8: + case 0xa: + return I16; + default: + break; + } + VIXL_UNREACHABLE(); + return kDataTypeValueInvalid; +} + + +NeonImmediate ImmediateVmvn::DecodeImmediate(uint32_t cmode, + uint32_t immediate) { + switch (cmode) { + case 0x0: + case 0x8: + return immediate; + case 0x2: + case 0xa: + return immediate << 8; + case 0x4: + return immediate << 16; + case 0x6: + return immediate << 24; + case 0xc: + return (immediate << 8) | 0xff; + case 0xd: + return (immediate << 16) | 0xffff; + default: + break; + } + VIXL_UNREACHABLE(); + return 0; +} + + +ImmediateVorr::ImmediateVorr(DataType dt, const NeonImmediate& neon_imm) { + if (neon_imm.IsInteger32()) { + uint32_t immediate = neon_imm.GetImmediate(); + if (dt.GetValue() == I16) { + if ((immediate & ~0xff) == 0) { + SetEncodingValue(0x9); + SetEncodedImmediate(immediate); + } else if ((immediate & ~0xff00) == 0) { + SetEncodingValue(0xb); + SetEncodedImmediate(immediate >> 8); + } + } else if (dt.GetValue() == I32) { + if ((immediate & ~0xff) == 0) { + SetEncodingValue(0x1); + SetEncodedImmediate(immediate); + } else if ((immediate & ~0xff00) == 0) { + SetEncodingValue(0x3); + SetEncodedImmediate(immediate >> 8); + } else if ((immediate & ~0xff0000) == 0) { + SetEncodingValue(0x5); + SetEncodedImmediate(immediate >> 16); + } else if ((immediate & ~0xff000000) == 0) { + SetEncodingValue(0x7); + SetEncodedImmediate(immediate >> 24); + } + } + } +} + + +DataType ImmediateVorr::DecodeDt(uint32_t cmode) { + switch (cmode) { + case 0x1: + case 0x3: + case 0x5: + case 0x7: + return I32; + case 0x9: + case 0xb: + return I16; + default: + break; + } + VIXL_UNREACHABLE(); + return kDataTypeValueInvalid; +} + + +NeonImmediate ImmediateVorr::DecodeImmediate(uint32_t cmode, + uint32_t immediate) { + switch (cmode) { + case 0x1: + case 0x9: + return immediate; + case 0x3: + case 0xb: + return immediate << 8; + case 0x5: + return immediate << 16; + case 0x7: + return immediate << 24; + default: + break; + } + VIXL_UNREACHABLE(); + return 0; +} + +// MemOperand + +std::ostream& operator<<(std::ostream& os, const MemOperand& operand) { + os << "[" << operand.GetBaseRegister(); + if (operand.GetAddrMode() == PostIndex) { + os << "]"; + if (operand.IsRegisterOnly()) return os << "!"; + } + if (operand.IsImmediate()) { + if ((operand.GetOffsetImmediate() != 0) || operand.GetSign().IsMinus() || + ((operand.GetAddrMode() != Offset) && !operand.IsRegisterOnly())) { + if (operand.GetOffsetImmediate() == 0) { + os << ", #" << operand.GetSign() << operand.GetOffsetImmediate(); + } else { + os << ", #" << operand.GetOffsetImmediate(); + } + } + } else if (operand.IsPlainRegister()) { + os << ", " << operand.GetSign() << operand.GetOffsetRegister(); + } else if (operand.IsShiftedRegister()) { + os << ", " << operand.GetSign() << operand.GetOffsetRegister() + << ImmediateShiftOperand(operand.GetShift(), operand.GetShiftAmount()); + } else { + VIXL_UNREACHABLE(); + return os; + } + if (operand.GetAddrMode() == Offset) { + os << "]"; + } else if (operand.GetAddrMode() == PreIndex) { + os << "]!"; + } + return os; +} + +std::ostream& operator<<(std::ostream& os, const AlignedMemOperand& operand) { + os << "[" << operand.GetBaseRegister() << operand.GetAlignment() << "]"; + if (operand.GetAddrMode() == PostIndex) { + if (operand.IsPlainRegister()) { + os << ", " << operand.GetOffsetRegister(); + } else { + os << "!"; + } + } + return os; +} + +} // namespace aarch32 +} // namespace vixl diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch32/operands-aarch32.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/operands-aarch32.h new file mode 100644 index 00000000..1d18bfd3 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch32/operands-aarch32.h @@ -0,0 +1,927 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH32_OPERANDS_AARCH32_H_ +#define VIXL_AARCH32_OPERANDS_AARCH32_H_ + +#include "aarch32/instructions-aarch32.h" + +namespace vixl { +namespace aarch32 { + +// Operand represents generic set of arguments to pass to an instruction. +// +// Usage: , +// +// where is the instruction to use (e.g., Mov(), Rsb(), etc.) +// is the destination register +// is the rest of the arguments to the instruction +// +// can be one of: +// +// # - an unsigned 32-bit immediate value +// , <#amount> - immediate shifted register +// , - register shifted register +// +class Operand { + public: + // { # } + // where is uint32_t. + // This is allowed to be an implicit constructor because Operand is + // a wrapper class that doesn't normally perform any type conversion. + Operand(uint32_t immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + rm_(NoReg), + shift_(LSL), + amount_(0), + rs_(NoReg) {} + Operand(int32_t immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + rm_(NoReg), + shift_(LSL), + amount_(0), + rs_(NoReg) {} + + // rm + // where rm is the base register + // This is allowed to be an implicit constructor because Operand is + // a wrapper class that doesn't normally perform any type conversion. + Operand(Register rm) // NOLINT(runtime/explicit) + : imm_(0), + rm_(rm), + shift_(LSL), + amount_(0), + rs_(NoReg) { + VIXL_ASSERT(rm_.IsValid()); + } + + // rm, + // where rm is the base register, and + // is RRX + Operand(Register rm, Shift shift) + : imm_(0), rm_(rm), shift_(shift), amount_(0), rs_(NoReg) { + VIXL_ASSERT(rm_.IsValid()); + VIXL_ASSERT(shift_.IsRRX()); + } + + // rm, # + // where rm is the base register, and + // is one of {LSL, LSR, ASR, ROR}, and + // is uint6_t. + Operand(Register rm, Shift shift, uint32_t amount) + : imm_(0), rm_(rm), shift_(shift), amount_(amount), rs_(NoReg) { + VIXL_ASSERT(rm_.IsValid()); + VIXL_ASSERT(!shift_.IsRRX()); +#ifdef VIXL_DEBUG + switch (shift_.GetType()) { + case LSL: + VIXL_ASSERT(amount_ <= 31); + break; + case ROR: + VIXL_ASSERT(amount_ <= 31); + break; + case LSR: + case ASR: + VIXL_ASSERT(amount_ <= 32); + break; + case RRX: + default: + VIXL_UNREACHABLE(); + break; + } +#endif + } + + // rm, rs + // where rm is the base register, and + // is one of {LSL, LSR, ASR, ROR}, and + // rs is the shifted register + Operand(Register rm, Shift shift, Register rs) + : imm_(0), rm_(rm), shift_(shift), amount_(0), rs_(rs) { + VIXL_ASSERT(rm_.IsValid() && rs_.IsValid()); + VIXL_ASSERT(!shift_.IsRRX()); + } + + // Factory methods creating operands from any integral or pointer type. The + // source must fit into 32 bits. + template + static Operand From(T immediate) { +#if __cplusplus >= 201103L + VIXL_STATIC_ASSERT_MESSAGE(std::is_integral::value, + "An integral type is required to build an " + "immediate operand."); +#endif + // Allow both a signed or unsigned 32 bit integer to be passed, but store it + // as a uint32_t. The signedness information will be lost. We have to add a + // static_cast to make sure the compiler does not complain about implicit 64 + // to 32 narrowing. It's perfectly acceptable for the user to pass a 64-bit + // value, as long as it can be encoded in 32 bits. + VIXL_ASSERT(IsInt32(immediate) || IsUint32(immediate)); + return Operand(static_cast(immediate)); + } + + template + static Operand From(T* address) { + uintptr_t address_as_integral = reinterpret_cast(address); + VIXL_ASSERT(IsUint32(address_as_integral)); + return Operand(static_cast(address_as_integral)); + } + + bool IsImmediate() const { return !rm_.IsValid(); } + + bool IsPlainRegister() const { + return rm_.IsValid() && !shift_.IsRRX() && !rs_.IsValid() && (amount_ == 0); + } + + bool IsImmediateShiftedRegister() const { + return rm_.IsValid() && !rs_.IsValid(); + } + + bool IsRegisterShiftedRegister() const { + return rm_.IsValid() && rs_.IsValid(); + } + + uint32_t GetImmediate() const { + VIXL_ASSERT(IsImmediate()); + return imm_; + } + + int32_t GetSignedImmediate() const { + VIXL_ASSERT(IsImmediate()); + int32_t result; + memcpy(&result, &imm_, sizeof(result)); + return result; + } + + Register GetBaseRegister() const { + VIXL_ASSERT(IsImmediateShiftedRegister() || IsRegisterShiftedRegister()); + return rm_; + } + + Shift GetShift() const { + VIXL_ASSERT(IsImmediateShiftedRegister() || IsRegisterShiftedRegister()); + return shift_; + } + + uint32_t GetShiftAmount() const { + VIXL_ASSERT(IsImmediateShiftedRegister()); + return amount_; + } + + Register GetShiftRegister() const { + VIXL_ASSERT(IsRegisterShiftedRegister()); + return rs_; + } + + uint32_t GetTypeEncodingValue() const { + return shift_.IsRRX() ? kRRXEncodedValue : shift_.GetValue(); + } + + private: +// Forbid implicitely creating operands around types that cannot be encoded +// into a uint32_t without loss. +#if __cplusplus >= 201103L + Operand(int64_t) = delete; // NOLINT(runtime/explicit) + Operand(uint64_t) = delete; // NOLINT(runtime/explicit) + Operand(float) = delete; // NOLINT(runtime/explicit) + Operand(double) = delete; // NOLINT(runtime/explicit) +#else + VIXL_NO_RETURN_IN_DEBUG_MODE Operand(int64_t) { // NOLINT(runtime/explicit) + VIXL_UNREACHABLE(); + } + VIXL_NO_RETURN_IN_DEBUG_MODE Operand(uint64_t) { // NOLINT(runtime/explicit) + VIXL_UNREACHABLE(); + } + VIXL_NO_RETURN_IN_DEBUG_MODE Operand(float) { // NOLINT + VIXL_UNREACHABLE(); + } + VIXL_NO_RETURN_IN_DEBUG_MODE Operand(double) { // NOLINT + VIXL_UNREACHABLE(); + } +#endif + + uint32_t imm_; + Register rm_; + Shift shift_; + uint32_t amount_; + Register rs_; +}; + +std::ostream& operator<<(std::ostream& os, const Operand& operand); + +class NeonImmediate { + template + struct DataTypeIdentity { + T data_type_; + }; + + public: + // { # } + // where is 32 bit number. + // This is allowed to be an implicit constructor because NeonImmediate is + // a wrapper class that doesn't normally perform any type conversion. + NeonImmediate(uint32_t immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + immediate_type_(I32) {} + NeonImmediate(int immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + immediate_type_(I32) {} + + // { # } + // where is a 64 bit number + // This is allowed to be an implicit constructor because NeonImmediate is + // a wrapper class that doesn't normally perform any type conversion. + NeonImmediate(int64_t immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + immediate_type_(I64) {} + NeonImmediate(uint64_t immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + immediate_type_(I64) {} + + // { # } + // where is a non zero floating point number which can be encoded + // as an 8 bit floating point (checked by the constructor). + // This is allowed to be an implicit constructor because NeonImmediate is + // a wrapper class that doesn't normally perform any type conversion. + NeonImmediate(float immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + immediate_type_(F32) {} + NeonImmediate(double immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + immediate_type_(F64) {} + + NeonImmediate(const NeonImmediate& src) + : imm_(src.imm_), immediate_type_(src.immediate_type_) {} + + template + T GetImmediate() const { + return GetImmediate(DataTypeIdentity()); + } + + template + T GetImmediate(const DataTypeIdentity&) const { + VIXL_ASSERT(sizeof(T) <= sizeof(uint32_t)); + VIXL_ASSERT(CanConvert()); + if (immediate_type_.Is(I64)) + return static_cast(imm_.u64_ & static_cast(-1)); + if (immediate_type_.Is(F64) || immediate_type_.Is(F32)) return 0; + return static_cast(imm_.u32_ & static_cast(-1)); + } + + uint64_t GetImmediate(const DataTypeIdentity&) const { + VIXL_ASSERT(CanConvert()); + if (immediate_type_.Is(I32)) return imm_.u32_; + if (immediate_type_.Is(F64) || immediate_type_.Is(F32)) return 0; + return imm_.u64_; + } + float GetImmediate(const DataTypeIdentity&) const { + VIXL_ASSERT(CanConvert()); + if (immediate_type_.Is(F64)) return static_cast(imm_.d_); + return imm_.f_; + } + double GetImmediate(const DataTypeIdentity&) const { + VIXL_ASSERT(CanConvert()); + if (immediate_type_.Is(F32)) return static_cast(imm_.f_); + return imm_.d_; + } + + bool IsInteger32() const { return immediate_type_.Is(I32); } + bool IsInteger64() const { return immediate_type_.Is(I64); } + bool IsInteger() const { return IsInteger32() | IsInteger64(); } + bool IsFloat() const { return immediate_type_.Is(F32); } + bool IsDouble() const { return immediate_type_.Is(F64); } + bool IsFloatZero() const { + if (immediate_type_.Is(F32)) return imm_.f_ == 0.0f; + if (immediate_type_.Is(F64)) return imm_.d_ == 0.0; + return false; + } + + template + bool CanConvert() const { + return CanConvert(DataTypeIdentity()); + } + + template + bool CanConvert(const DataTypeIdentity&) const { + VIXL_ASSERT(sizeof(T) < sizeof(uint32_t)); + return (immediate_type_.Is(I32) && ((imm_.u32_ >> (8 * sizeof(T))) == 0)) || + (immediate_type_.Is(I64) && ((imm_.u64_ >> (8 * sizeof(T))) == 0)) || + (immediate_type_.Is(F32) && (imm_.f_ == 0.0f)) || + (immediate_type_.Is(F64) && (imm_.d_ == 0.0)); + } + bool CanConvert(const DataTypeIdentity&) const { + return immediate_type_.Is(I32) || + (immediate_type_.Is(I64) && ((imm_.u64_ >> 32) == 0)) || + (immediate_type_.Is(F32) && (imm_.f_ == 0.0f)) || + (immediate_type_.Is(F64) && (imm_.d_ == 0.0)); + } + bool CanConvert(const DataTypeIdentity&) const { + return IsInteger() || CanConvert(); + } + bool CanConvert(const DataTypeIdentity&) const { + return IsFloat() || IsDouble(); + } + bool CanConvert(const DataTypeIdentity&) const { + return IsFloat() || IsDouble(); + } + friend std::ostream& operator<<(std::ostream& os, + const NeonImmediate& operand); + + private: + union NeonImmediateType { + uint64_t u64_; + double d_; + uint32_t u32_; + float f_; + NeonImmediateType(uint64_t u) : u64_(u) {} + NeonImmediateType(int64_t u) : u64_(u) {} + NeonImmediateType(uint32_t u) : u32_(u) {} + NeonImmediateType(int32_t u) : u32_(u) {} + NeonImmediateType(double d) : d_(d) {} + NeonImmediateType(float f) : f_(f) {} + NeonImmediateType(const NeonImmediateType& ref) : u64_(ref.u64_) {} + } imm_; + + DataType immediate_type_; +}; + +std::ostream& operator<<(std::ostream& os, const NeonImmediate& operand); + +class NeonOperand { + public: + NeonOperand(int32_t immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + rm_(NoDReg) {} + NeonOperand(uint32_t immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + rm_(NoDReg) {} + NeonOperand(int64_t immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + rm_(NoDReg) {} + NeonOperand(uint64_t immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + rm_(NoDReg) {} + NeonOperand(float immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + rm_(NoDReg) {} + NeonOperand(double immediate) // NOLINT(runtime/explicit) + : imm_(immediate), + rm_(NoDReg) {} + NeonOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit) + : imm_(imm), + rm_(NoDReg) {} + NeonOperand(const VRegister& rm) // NOLINT(runtime/explicit) + : imm_(0), + rm_(rm) { + VIXL_ASSERT(rm_.IsValid()); + } + + bool IsImmediate() const { return !rm_.IsValid(); } + bool IsRegister() const { return rm_.IsValid(); } + bool IsFloatZero() const { + VIXL_ASSERT(IsImmediate()); + return imm_.IsFloatZero(); + } + + const NeonImmediate& GetNeonImmediate() const { return imm_; } + + VRegister GetRegister() const { + VIXL_ASSERT(IsRegister()); + return rm_; + } + + protected: + NeonImmediate imm_; + VRegister rm_; +}; + +std::ostream& operator<<(std::ostream& os, const NeonOperand& operand); + +// SOperand represents either an immediate or a SRegister. +class SOperand : public NeonOperand { + public: + // # + // where is 32bit int + // This is allowed to be an implicit constructor because SOperand is + // a wrapper class that doesn't normally perform any type conversion. + SOperand(int32_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + SOperand(uint32_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + // # + // where is 32bit float + SOperand(float immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + // where is 64bit float + SOperand(double immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + + SOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit) + : NeonOperand(imm) {} + + // rm + // This is allowed to be an implicit constructor because SOperand is + // a wrapper class that doesn't normally perform any type conversion. + SOperand(SRegister rm) // NOLINT(runtime/explicit) + : NeonOperand(rm) {} + SRegister GetRegister() const { + VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kSRegister)); + return SRegister(rm_.GetCode()); + } +}; + +// DOperand represents either an immediate or a DRegister. +std::ostream& operator<<(std::ostream& os, const SOperand& operand); + +class DOperand : public NeonOperand { + public: + // # + // where is uint32_t. + // This is allowed to be an implicit constructor because DOperand is + // a wrapper class that doesn't normally perform any type conversion. + DOperand(int32_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + DOperand(uint32_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + DOperand(int64_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + DOperand(uint64_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + + // # + // where is a non zero floating point number which can be encoded + // as an 8 bit floating point (checked by the constructor). + // This is allowed to be an implicit constructor because DOperand is + // a wrapper class that doesn't normally perform any type conversion. + DOperand(float immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + DOperand(double immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + + DOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit) + : NeonOperand(imm) {} + // rm + // This is allowed to be an implicit constructor because DOperand is + // a wrapper class that doesn't normally perform any type conversion. + DOperand(DRegister rm) // NOLINT(runtime/explicit) + : NeonOperand(rm) {} + + DRegister GetRegister() const { + VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kDRegister)); + return DRegister(rm_.GetCode()); + } +}; + +std::ostream& operator<<(std::ostream& os, const DOperand& operand); + +// QOperand represents either an immediate or a QRegister. +class QOperand : public NeonOperand { + public: + // # + // where is uint32_t. + // This is allowed to be an implicit constructor because QOperand is + // a wrapper class that doesn't normally perform any type conversion. + QOperand(int32_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + QOperand(uint32_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + QOperand(int64_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + QOperand(uint64_t immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + QOperand(float immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + QOperand(double immediate) // NOLINT(runtime/explicit) + : NeonOperand(immediate) {} + + QOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit) + : NeonOperand(imm) {} + + // rm + // This is allowed to be an implicit constructor because QOperand is + // a wrapper class that doesn't normally perform any type conversion. + QOperand(QRegister rm) // NOLINT(runtime/explicit) + : NeonOperand(rm) { + VIXL_ASSERT(rm_.IsValid()); + } + + QRegister GetRegister() const { + VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kQRegister)); + return QRegister(rm_.GetCode()); + } +}; + +std::ostream& operator<<(std::ostream& os, const QOperand& operand); + +class ImmediateVFP : public EncodingValue { + template + struct FloatType { + typedef T base_type; + }; + + public: + explicit ImmediateVFP(const NeonImmediate& neon_imm) { + if (neon_imm.IsFloat()) { + const float imm = neon_imm.GetImmediate(); + if (VFP::IsImmFP32(imm)) { + SetEncodingValue(VFP::FP32ToImm8(imm)); + } + } else if (neon_imm.IsDouble()) { + const double imm = neon_imm.GetImmediate(); + if (VFP::IsImmFP64(imm)) { + SetEncodingValue(VFP::FP64ToImm8(imm)); + } + } + } + + template + static T Decode(uint32_t v) { + return Decode(v, FloatType()); + } + + static float Decode(uint32_t imm8, const FloatType&) { + return VFP::Imm8ToFP32(imm8); + } + + static double Decode(uint32_t imm8, const FloatType&) { + return VFP::Imm8ToFP64(imm8); + } +}; + + +class ImmediateVbic : public EncodingValueAndImmediate { + public: + ImmediateVbic(DataType dt, const NeonImmediate& neon_imm); + static DataType DecodeDt(uint32_t cmode); + static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate); +}; + +class ImmediateVand : public ImmediateVbic { + public: + ImmediateVand(DataType dt, const NeonImmediate neon_imm) + : ImmediateVbic(dt, neon_imm) { + if (IsValid()) { + SetEncodedImmediate(~GetEncodedImmediate() & 0xff); + } + } +}; + +class ImmediateVmov : public EncodingValueAndImmediate { + public: + ImmediateVmov(DataType dt, const NeonImmediate& neon_imm); + static DataType DecodeDt(uint32_t cmode); + static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate); +}; + +class ImmediateVmvn : public EncodingValueAndImmediate { + public: + ImmediateVmvn(DataType dt, const NeonImmediate& neon_imm); + static DataType DecodeDt(uint32_t cmode); + static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate); +}; + +class ImmediateVorr : public EncodingValueAndImmediate { + public: + ImmediateVorr(DataType dt, const NeonImmediate& neon_imm); + static DataType DecodeDt(uint32_t cmode); + static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate); +}; + +class ImmediateVorn : public ImmediateVorr { + public: + ImmediateVorn(DataType dt, const NeonImmediate& neon_imm) + : ImmediateVorr(dt, neon_imm) { + if (IsValid()) { + SetEncodedImmediate(~GetEncodedImmediate() & 0xff); + } + } +}; + +// MemOperand represents the addressing mode of a load or store instruction. +// +// Usage: , +// +// where is the instruction to use (e.g., Ldr(), Str(), etc.), +// is general purpose register to be transferred, +// is the rest of the arguments to the instruction +// +// can be in one of 3 addressing modes: +// +// [ , ] == offset addressing +// [ , ]! == pre-indexed addressing +// [ ], == post-indexed addressing +// +// where can be one of: +// - an immediate constant, such as , +// - an index register +// - a shifted index register , # +// +// The index register may have an associated {+/-} sign, +// which if ommitted, defaults to + . +// +// We have two constructors for the offset: +// +// One with a signed value offset parameter. The value of sign_ is +// "sign_of(constructor's offset parameter) and the value of offset_ is +// "constructor's offset parameter". +// +// The other with a sign and a positive value offset parameters. The value of +// sign_ is "constructor's sign parameter" and the value of offset_ is +// "constructor's sign parameter * constructor's offset parameter". +// +// The value of offset_ reflects the effective offset. For an offset_ of 0, +// sign_ can be positive or negative. Otherwise, sign_ always agrees with +// the sign of offset_. +class MemOperand { + public: + // rn + // where rn is the general purpose base register only + explicit MemOperand(Register rn, AddrMode addrmode = Offset) + : rn_(rn), + offset_(0), + sign_(plus), + rm_(NoReg), + shift_(LSL), + shift_amount_(0), + addrmode_(addrmode | kMemOperandRegisterOnly) { + VIXL_ASSERT(rn_.IsValid()); + } + + // rn, # + // where rn is the general purpose base register, + // is a 32-bit offset to add to rn + // + // Note: if rn is PC, then this form is equivalent to a "label" + // Note: the second constructor allow minus zero (-0). + MemOperand(Register rn, int32_t offset, AddrMode addrmode = Offset) + : rn_(rn), + offset_(offset), + sign_((offset < 0) ? minus : plus), + rm_(NoReg), + shift_(LSL), + shift_amount_(0), + addrmode_(addrmode) { + VIXL_ASSERT(rn_.IsValid()); + } + MemOperand(Register rn, Sign sign, int32_t offset, AddrMode addrmode = Offset) + : rn_(rn), + offset_(sign.IsPlus() ? offset : -offset), + sign_(sign), + rm_(NoReg), + shift_(LSL), + shift_amount_(0), + addrmode_(addrmode) { + VIXL_ASSERT(rn_.IsValid()); + // With this constructor, the sign must only be specified by "sign". + VIXL_ASSERT(offset >= 0); + } + + // rn, {+/-}rm + // where rn is the general purpose base register, + // {+/-} is the sign of the index register, + // rm is the general purpose index register, + MemOperand(Register rn, Sign sign, Register rm, AddrMode addrmode = Offset) + : rn_(rn), + offset_(0), + sign_(sign), + rm_(rm), + shift_(LSL), + shift_amount_(0), + addrmode_(addrmode) { + VIXL_ASSERT(rn_.IsValid() && rm_.IsValid()); + } + + // rn, rm + // where rn is the general purpose base register, + // rm is the general purpose index register, + MemOperand(Register rn, Register rm, AddrMode addrmode = Offset) + : rn_(rn), + offset_(0), + sign_(plus), + rm_(rm), + shift_(LSL), + shift_amount_(0), + addrmode_(addrmode) { + VIXL_ASSERT(rn_.IsValid() && rm_.IsValid()); + } + + // rn, {+/-}rm, + // where rn is the general purpose base register, + // {+/-} is the sign of the index register, + // rm is the general purpose index register, + // is RRX, applied to value from rm + MemOperand(Register rn, + Sign sign, + Register rm, + Shift shift, + AddrMode addrmode = Offset) + : rn_(rn), + offset_(0), + sign_(sign), + rm_(rm), + shift_(shift), + shift_amount_(0), + addrmode_(addrmode) { + VIXL_ASSERT(rn_.IsValid() && rm_.IsValid()); + VIXL_ASSERT(shift_.IsRRX()); + } + + // rn, rm, + // where rn is the general purpose base register, + // rm is the general purpose index register, + // is RRX, applied to value from rm + MemOperand(Register rn, Register rm, Shift shift, AddrMode addrmode = Offset) + : rn_(rn), + offset_(0), + sign_(plus), + rm_(rm), + shift_(shift), + shift_amount_(0), + addrmode_(addrmode) { + VIXL_ASSERT(rn_.IsValid() && rm_.IsValid()); + VIXL_ASSERT(shift_.IsRRX()); + } + + // rn, {+/-}rm, # + // where rn is the general purpose base register, + // {+/-} is the sign of the index register, + // rm is the general purpose index register, + // is one of {LSL, LSR, ASR, ROR}, applied to value from rm + // is optional size to apply to value from rm + MemOperand(Register rn, + Sign sign, + Register rm, + Shift shift, + uint32_t shift_amount, + AddrMode addrmode = Offset) + : rn_(rn), + offset_(0), + sign_(sign), + rm_(rm), + shift_(shift), + shift_amount_(shift_amount), + addrmode_(addrmode) { + VIXL_ASSERT(rn_.IsValid() && rm_.IsValid()); + CheckShift(); + } + + // rn, rm, # + // where rn is the general purpose base register, + // rm is the general purpose index register, + // is one of {LSL, LSR, ASR, ROR}, applied to value from rm + // is optional size to apply to value from rm + MemOperand(Register rn, + Register rm, + Shift shift, + uint32_t shift_amount, + AddrMode addrmode = Offset) + : rn_(rn), + offset_(0), + sign_(plus), + rm_(rm), + shift_(shift), + shift_amount_(shift_amount), + addrmode_(addrmode) { + VIXL_ASSERT(rn_.IsValid() && rm_.IsValid()); + CheckShift(); + } + + Register GetBaseRegister() const { return rn_; } + int32_t GetOffsetImmediate() const { return offset_; } + bool IsOffsetImmediateWithinRange(int min, + int max, + int multiple_of = 1) const { + return (offset_ >= min) && (offset_ <= max) && + ((offset_ % multiple_of) == 0); + } + Sign GetSign() const { return sign_; } + Register GetOffsetRegister() const { return rm_; } + Shift GetShift() const { return shift_; } + unsigned GetShiftAmount() const { return shift_amount_; } + AddrMode GetAddrMode() const { + return static_cast(addrmode_ & kMemOperandAddrModeMask); + } + bool IsRegisterOnly() const { + return (addrmode_ & kMemOperandRegisterOnly) != 0; + } + + bool IsImmediate() const { return !rm_.IsValid(); } + bool IsImmediateZero() const { return !rm_.IsValid() && (offset_ == 0); } + bool IsPlainRegister() const { + return rm_.IsValid() && shift_.IsLSL() && (shift_amount_ == 0); + } + bool IsShiftedRegister() const { return rm_.IsValid(); } + bool IsImmediateOffset() const { + return (GetAddrMode() == Offset) && !rm_.IsValid(); + } + bool IsImmediateZeroOffset() const { + return (GetAddrMode() == Offset) && !rm_.IsValid() && (offset_ == 0); + } + bool IsRegisterOffset() const { + return (GetAddrMode() == Offset) && rm_.IsValid() && shift_.IsLSL() && + (shift_amount_ == 0); + } + bool IsShiftedRegisterOffset() const { + return (GetAddrMode() == Offset) && rm_.IsValid(); + } + uint32_t GetTypeEncodingValue() const { + return shift_.IsRRX() ? kRRXEncodedValue : shift_.GetValue(); + } + bool IsOffset() const { return GetAddrMode() == Offset; } + bool IsPreIndex() const { return GetAddrMode() == PreIndex; } + bool IsPostIndex() const { return GetAddrMode() == PostIndex; } + bool IsShiftValid() const { return shift_.IsValidAmount(shift_amount_); } + + private: + static const int kMemOperandRegisterOnly = 0x1000; + static const int kMemOperandAddrModeMask = 0xfff; + void CheckShift() { +#ifdef VIXL_DEBUG + // Disallow any zero shift other than RRX #0 and LSL #0 . + if ((shift_amount_ == 0) && shift_.IsRRX()) return; + if ((shift_amount_ == 0) && !shift_.IsLSL()) { + VIXL_ABORT_WITH_MSG( + "A shift by 0 is only accepted in " + "the case of lsl and will be treated as " + "no shift.\n"); + } + switch (shift_.GetType()) { + case LSL: + VIXL_ASSERT(shift_amount_ <= 31); + break; + case ROR: + VIXL_ASSERT(shift_amount_ <= 31); + break; + case LSR: + case ASR: + VIXL_ASSERT(shift_amount_ <= 32); + break; + case RRX: + default: + VIXL_UNREACHABLE(); + break; + } +#endif + } + Register rn_; + int32_t offset_; + Sign sign_; + Register rm_; + Shift shift_; + uint32_t shift_amount_; + uint32_t addrmode_; +}; + +std::ostream& operator<<(std::ostream& os, const MemOperand& operand); + +class AlignedMemOperand : public MemOperand { + public: + AlignedMemOperand(Register rn, Alignment align, AddrMode addrmode = Offset) + : MemOperand(rn, addrmode), align_(align) { + VIXL_ASSERT(addrmode != PreIndex); + } + + AlignedMemOperand(Register rn, + Alignment align, + Register rm, + AddrMode addrmode) + : MemOperand(rn, rm, addrmode), align_(align) { + VIXL_ASSERT(addrmode != PreIndex); + } + + Alignment GetAlignment() const { return align_; } + + private: + Alignment align_; +}; + +std::ostream& operator<<(std::ostream& os, const AlignedMemOperand& operand); + +} // namespace aarch32 +} // namespace vixl + +#endif // VIXL_AARCH32_OPERANDS_AARCH32_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/abi-aarch64.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/abi-aarch64.h new file mode 100644 index 00000000..a0058024 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/abi-aarch64.h @@ -0,0 +1,167 @@ +// Copyright 2016, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The ABI features are only supported with C++11 or later. +#if __cplusplus >= 201103L +// This should not be defined manually. +#define VIXL_HAS_ABI_SUPPORT +#elif defined(VIXL_HAS_ABI_SUPPORT) +#error "The ABI support requires C++11 or later." +#endif + +#ifdef VIXL_HAS_ABI_SUPPORT + +#ifndef VIXL_AARCH64_ABI_AARCH64_H_ +#define VIXL_AARCH64_ABI_AARCH64_H_ + +#include +#include + +#include "../globals-vixl.h" + +#include "instructions-aarch64.h" +#include "operands-aarch64.h" + +namespace vixl { +namespace aarch64 { + +// Class describing the AArch64 procedure call standard, as defined in "ARM +// Procedure Call Standard for the ARM 64-bit Architecture (AArch64)", +// release 1.0 (AAPCS below). +// +// The stages in the comments match the description in that document. +// +// Stage B does not apply to arguments handled by this class. +class ABI { + public: + explicit ABI(Register stack_pointer = sp) : stack_pointer_(stack_pointer) { + // Stage A - Initialization + Reset(); + } + + void Reset() { + NGRN_ = 0; + NSRN_ = 0; + stack_offset_ = 0; + } + + int GetStackSpaceRequired() { return stack_offset_; } + + // The logic is described in section 5.5 of the AAPCS. + template + GenericOperand GetReturnGenericOperand() const { + ABI abi(stack_pointer_); + GenericOperand result = abi.GetNextParameterGenericOperand(); + VIXL_ASSERT(result.IsCPURegister()); + return result; + } + + // The logic is described in section 5.4.2 of the AAPCS. + // The `GenericOperand` returned describes the location reserved for the + // argument from the point of view of the callee. + template + GenericOperand GetNextParameterGenericOperand() { + const bool is_floating_point_type = std::is_floating_point::value; + const bool is_integral_type = + std::is_integral::value || std::is_enum::value; + const bool is_pointer_type = std::is_pointer::value; + int type_alignment = std::alignment_of::value; + + // We only support basic types. + VIXL_ASSERT(is_floating_point_type || is_integral_type || is_pointer_type); + + // To ensure we get the correct type of operand when simulating on a 32-bit + // host, force the size of pointer types to the native AArch64 pointer size. + unsigned size = is_pointer_type ? 8 : sizeof(T); + // The size of the 'operand' reserved for the argument. + unsigned operand_size = AlignUp(size, kWRegSizeInBytes); + if (size > 8) { + VIXL_UNIMPLEMENTED(); + return GenericOperand(); + } + + // Stage C.1 + if (is_floating_point_type && (NSRN_ < 8)) { + return GenericOperand(FPRegister(NSRN_++, size * kBitsPerByte)); + } + // Stages C.2, C.3, and C.4: Unsupported. Caught by the assertions above. + // Stages C.5 and C.6 + if (is_floating_point_type) { + VIXL_STATIC_ASSERT( + !is_floating_point_type || + (std::is_same::value || std::is_same::value)); + int offset = stack_offset_; + stack_offset_ += 8; + return GenericOperand(MemOperand(stack_pointer_, offset), operand_size); + } + // Stage C.7 + if ((is_integral_type || is_pointer_type) && (size <= 8) && (NGRN_ < 8)) { + return GenericOperand(Register(NGRN_++, operand_size * kBitsPerByte)); + } + // Stage C.8 + if (type_alignment == 16) { + NGRN_ = AlignUp(NGRN_, 2); + } + // Stage C.9 + if (is_integral_type && (size == 16) && (NGRN_ < 7)) { + VIXL_UNIMPLEMENTED(); + return GenericOperand(); + } + // Stage C.10: Unsupported. Caught by the assertions above. + // Stage C.11 + NGRN_ = 8; + // Stage C.12 + stack_offset_ = AlignUp(stack_offset_, std::max(type_alignment, 8)); + // Stage C.13: Unsupported. Caught by the assertions above. + // Stage C.14 + VIXL_ASSERT(size <= 8u); + size = std::max(size, 8u); + int offset = stack_offset_; + stack_offset_ += size; + return GenericOperand(MemOperand(stack_pointer_, offset), operand_size); + } + + private: + Register stack_pointer_; + // Next General-purpose Register Number. + int NGRN_; + // Next SIMD and Floating-point Register Number. + int NSRN_; + // The acronym "NSAA" used in the standard refers to the "Next Stacked + // Argument Address". Here we deal with offsets from the stack pointer. + int stack_offset_; +}; + +template <> +inline GenericOperand ABI::GetReturnGenericOperand() const { + return GenericOperand(); +} +} +} // namespace vixl::aarch64 + +#endif // VIXL_AARCH64_ABI_AARCH64_H_ + +#endif // VIXL_HAS_ABI_SUPPORT diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/assembler-aarch64.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/assembler-aarch64.cc new file mode 100644 index 00000000..1c3ea65b --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/assembler-aarch64.cc @@ -0,0 +1,6295 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#include + +#include "assembler-aarch64.h" +#include "macro-assembler-aarch64.h" + +namespace vixl { +namespace aarch64 { + +RawLiteral::RawLiteral(size_t size, + LiteralPool* literal_pool, + DeletionPolicy deletion_policy) + : size_(size), + offset_(0), + low64_(0), + high64_(0), + literal_pool_(literal_pool), + deletion_policy_(deletion_policy) { + VIXL_ASSERT((deletion_policy == kManuallyDeleted) || (literal_pool_ != NULL)); + if (deletion_policy == kDeletedOnPoolDestruction) { + literal_pool_->DeleteOnDestruction(this); + } +} + + +void Assembler::Reset() { GetBuffer()->Reset(); } + + +void Assembler::bind(Label* label) { + BindToOffset(label, GetBuffer()->GetCursorOffset()); +} + + +void Assembler::BindToOffset(Label* label, ptrdiff_t offset) { + VIXL_ASSERT((offset >= 0) && (offset <= GetBuffer()->GetCursorOffset())); + VIXL_ASSERT(offset % kInstructionSize == 0); + + label->Bind(offset); + + for (Label::LabelLinksIterator it(label); !it.Done(); it.Advance()) { + Instruction* link = + GetBuffer()->GetOffsetAddress(*it.Current()); + link->SetImmPCOffsetTarget(GetLabelAddress(label)); + } + label->ClearAllLinks(); +} + + +// A common implementation for the LinkAndGetOffsetTo helpers. +// +// The offset is calculated by aligning the PC and label addresses down to a +// multiple of 1 << element_shift, then calculating the (scaled) offset between +// them. This matches the semantics of adrp, for example. +template +ptrdiff_t Assembler::LinkAndGetOffsetTo(Label* label) { + VIXL_STATIC_ASSERT(element_shift < (sizeof(ptrdiff_t) * 8)); + + if (label->IsBound()) { + uintptr_t pc_offset = GetCursorAddress() >> element_shift; + uintptr_t label_offset = GetLabelAddress(label) >> element_shift; + return label_offset - pc_offset; + } else { + label->AddLink(GetBuffer()->GetCursorOffset()); + return 0; + } +} + + +ptrdiff_t Assembler::LinkAndGetByteOffsetTo(Label* label) { + return LinkAndGetOffsetTo<0>(label); +} + + +ptrdiff_t Assembler::LinkAndGetInstructionOffsetTo(Label* label) { + return LinkAndGetOffsetTo(label); +} + + +ptrdiff_t Assembler::LinkAndGetPageOffsetTo(Label* label) { + return LinkAndGetOffsetTo(label); +} + + +void Assembler::place(RawLiteral* literal) { + VIXL_ASSERT(!literal->IsPlaced()); + + // Patch instructions using this literal. + if (literal->IsUsed()) { + Instruction* target = GetCursorAddress(); + ptrdiff_t offset = literal->GetLastUse(); + bool done; + do { + Instruction* ldr = GetBuffer()->GetOffsetAddress(offset); + VIXL_ASSERT(ldr->IsLoadLiteral()); + + ptrdiff_t imm19 = ldr->GetImmLLiteral(); + VIXL_ASSERT(imm19 <= 0); + done = (imm19 == 0); + offset += imm19 * kLiteralEntrySize; + + ldr->SetImmLLiteral(target); + } while (!done); + } + + // "bind" the literal. + literal->SetOffset(GetCursorOffset()); + // Copy the data into the pool. + switch (literal->GetSize()) { + case kSRegSizeInBytes: + dc32(literal->GetRawValue32()); + break; + case kDRegSizeInBytes: + dc64(literal->GetRawValue64()); + break; + default: + VIXL_ASSERT(literal->GetSize() == kQRegSizeInBytes); + dc64(literal->GetRawValue128Low64()); + dc64(literal->GetRawValue128High64()); + } + + literal->literal_pool_ = NULL; +} + + +ptrdiff_t Assembler::LinkAndGetWordOffsetTo(RawLiteral* literal) { + VIXL_ASSERT(IsWordAligned(GetCursorOffset())); + + bool register_first_use = + (literal->GetLiteralPool() != NULL) && !literal->IsUsed(); + + if (literal->IsPlaced()) { + // The literal is "behind", the offset will be negative. + VIXL_ASSERT((literal->GetOffset() - GetCursorOffset()) <= 0); + return (literal->GetOffset() - GetCursorOffset()) >> kLiteralEntrySizeLog2; + } + + ptrdiff_t offset = 0; + // Link all uses together. + if (literal->IsUsed()) { + offset = + (literal->GetLastUse() - GetCursorOffset()) >> kLiteralEntrySizeLog2; + } + literal->SetLastUse(GetCursorOffset()); + + if (register_first_use) { + literal->GetLiteralPool()->AddEntry(literal); + } + + return offset; +} + + +// Code generation. +void Assembler::br(const Register& xn) { + VIXL_ASSERT(xn.Is64Bits()); + Emit(BR | Rn(xn)); +} + + +void Assembler::blr(const Register& xn) { + VIXL_ASSERT(xn.Is64Bits()); + Emit(BLR | Rn(xn)); +} + + +void Assembler::ret(const Register& xn) { + VIXL_ASSERT(xn.Is64Bits()); + Emit(RET | Rn(xn)); +} + + +void Assembler::braaz(const Register& xn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xn.Is64Bits()); + Emit(BRAAZ | Rn(xn) | Rd_mask); +} + +void Assembler::brabz(const Register& xn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xn.Is64Bits()); + Emit(BRABZ | Rn(xn) | Rd_mask); +} + +void Assembler::blraaz(const Register& xn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xn.Is64Bits()); + Emit(BLRAAZ | Rn(xn) | Rd_mask); +} + +void Assembler::blrabz(const Register& xn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xn.Is64Bits()); + Emit(BLRABZ | Rn(xn) | Rd_mask); +} + +void Assembler::retaa() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(RETAA | Rn_mask | Rd_mask); +} + +void Assembler::retab() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(RETAB | Rn_mask | Rd_mask); +} + +// The Arm ARM names the register Xm but encodes it in the Xd bitfield. +void Assembler::braa(const Register& xn, const Register& xm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xn.Is64Bits() && xm.Is64Bits()); + Emit(BRAA | Rn(xn) | RdSP(xm)); +} + +void Assembler::brab(const Register& xn, const Register& xm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xn.Is64Bits() && xm.Is64Bits()); + Emit(BRAB | Rn(xn) | RdSP(xm)); +} + +void Assembler::blraa(const Register& xn, const Register& xm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xn.Is64Bits() && xm.Is64Bits()); + Emit(BLRAA | Rn(xn) | RdSP(xm)); +} + +void Assembler::blrab(const Register& xn, const Register& xm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xn.Is64Bits() && xm.Is64Bits()); + Emit(BLRAB | Rn(xn) | RdSP(xm)); +} + + +void Assembler::b(int64_t imm26) { Emit(B | ImmUncondBranch(imm26)); } + + +void Assembler::b(int64_t imm19, Condition cond) { + Emit(B_cond | ImmCondBranch(imm19) | cond); +} + + +void Assembler::b(Label* label) { + int64_t offset = LinkAndGetInstructionOffsetTo(label); + VIXL_ASSERT(Instruction::IsValidImmPCOffset(UncondBranchType, offset)); + b(static_cast(offset)); +} + + +void Assembler::b(Label* label, Condition cond) { + int64_t offset = LinkAndGetInstructionOffsetTo(label); + VIXL_ASSERT(Instruction::IsValidImmPCOffset(CondBranchType, offset)); + b(static_cast(offset), cond); +} + + +void Assembler::bl(int64_t imm26) { Emit(BL | ImmUncondBranch(imm26)); } + + +void Assembler::bl(Label* label) { + int64_t offset = LinkAndGetInstructionOffsetTo(label); + VIXL_ASSERT(Instruction::IsValidImmPCOffset(UncondBranchType, offset)); + bl(static_cast(offset)); +} + + +void Assembler::cbz(const Register& rt, int64_t imm19) { + Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt)); +} + + +void Assembler::cbz(const Register& rt, Label* label) { + int64_t offset = LinkAndGetInstructionOffsetTo(label); + VIXL_ASSERT(Instruction::IsValidImmPCOffset(CompareBranchType, offset)); + cbz(rt, static_cast(offset)); +} + + +void Assembler::cbnz(const Register& rt, int64_t imm19) { + Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt)); +} + + +void Assembler::cbnz(const Register& rt, Label* label) { + int64_t offset = LinkAndGetInstructionOffsetTo(label); + VIXL_ASSERT(Instruction::IsValidImmPCOffset(CompareBranchType, offset)); + cbnz(rt, static_cast(offset)); +} + + +void Assembler::NEONTable(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEONTableOp op) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.Is16B() || vd.Is8B()); + VIXL_ASSERT(vn.Is16B()); + VIXL_ASSERT(AreSameFormat(vd, vm)); + Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONTable(vd, vn, vm, NEON_TBL_1v); +} + + +void Assembler::tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vm) { + USE(vn2); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vn, vn2)); + VIXL_ASSERT(AreConsecutive(vn, vn2)); + NEONTable(vd, vn, vm, NEON_TBL_2v); +} + + +void Assembler::tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vm) { + USE(vn2, vn3); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vn, vn2, vn3)); + VIXL_ASSERT(AreConsecutive(vn, vn2, vn3)); + NEONTable(vd, vn, vm, NEON_TBL_3v); +} + + +void Assembler::tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vn4, + const VRegister& vm) { + USE(vn2, vn3, vn4); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4)); + VIXL_ASSERT(AreConsecutive(vn, vn2, vn3, vn4)); + NEONTable(vd, vn, vm, NEON_TBL_4v); +} + + +void Assembler::tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONTable(vd, vn, vm, NEON_TBX_1v); +} + + +void Assembler::tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vm) { + USE(vn2); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vn, vn2)); + VIXL_ASSERT(AreConsecutive(vn, vn2)); + NEONTable(vd, vn, vm, NEON_TBX_2v); +} + + +void Assembler::tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vm) { + USE(vn2, vn3); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vn, vn2, vn3)); + VIXL_ASSERT(AreConsecutive(vn, vn2, vn3)); + NEONTable(vd, vn, vm, NEON_TBX_3v); +} + + +void Assembler::tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vn4, + const VRegister& vm) { + USE(vn2, vn3, vn4); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4)); + VIXL_ASSERT(AreConsecutive(vn, vn2, vn3, vn4)); + NEONTable(vd, vn, vm, NEON_TBX_4v); +} + + +void Assembler::tbz(const Register& rt, unsigned bit_pos, int64_t imm14) { + VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize))); + Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); +} + + +void Assembler::tbz(const Register& rt, unsigned bit_pos, Label* label) { + ptrdiff_t offset = LinkAndGetInstructionOffsetTo(label); + VIXL_ASSERT(Instruction::IsValidImmPCOffset(TestBranchType, offset)); + tbz(rt, bit_pos, static_cast(offset)); +} + + +void Assembler::tbnz(const Register& rt, unsigned bit_pos, int64_t imm14) { + VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize))); + Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); +} + + +void Assembler::tbnz(const Register& rt, unsigned bit_pos, Label* label) { + ptrdiff_t offset = LinkAndGetInstructionOffsetTo(label); + VIXL_ASSERT(Instruction::IsValidImmPCOffset(TestBranchType, offset)); + tbnz(rt, bit_pos, static_cast(offset)); +} + + +void Assembler::adr(const Register& xd, int64_t imm21) { + VIXL_ASSERT(xd.Is64Bits()); + Emit(ADR | ImmPCRelAddress(imm21) | Rd(xd)); +} + + +void Assembler::adr(const Register& xd, Label* label) { + adr(xd, static_cast(LinkAndGetByteOffsetTo(label))); +} + + +void Assembler::adrp(const Register& xd, int64_t imm21) { + VIXL_ASSERT(xd.Is64Bits()); + Emit(ADRP | ImmPCRelAddress(imm21) | Rd(xd)); +} + + +void Assembler::adrp(const Register& xd, Label* label) { + VIXL_ASSERT(AllowPageOffsetDependentCode()); + adrp(xd, static_cast(LinkAndGetPageOffsetTo(label))); +} + + +void Assembler::add(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSub(rd, rn, operand, LeaveFlags, ADD); +} + + +void Assembler::adds(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSub(rd, rn, operand, SetFlags, ADD); +} + + +void Assembler::cmn(const Register& rn, const Operand& operand) { + Register zr = AppropriateZeroRegFor(rn); + adds(zr, rn, operand); +} + + +void Assembler::sub(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSub(rd, rn, operand, LeaveFlags, SUB); +} + + +void Assembler::subs(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSub(rd, rn, operand, SetFlags, SUB); +} + + +void Assembler::cmp(const Register& rn, const Operand& operand) { + Register zr = AppropriateZeroRegFor(rn); + subs(zr, rn, operand); +} + + +void Assembler::neg(const Register& rd, const Operand& operand) { + Register zr = AppropriateZeroRegFor(rd); + sub(rd, zr, operand); +} + + +void Assembler::negs(const Register& rd, const Operand& operand) { + Register zr = AppropriateZeroRegFor(rd); + subs(rd, zr, operand); +} + + +void Assembler::adc(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC); +} + + +void Assembler::adcs(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSubWithCarry(rd, rn, operand, SetFlags, ADC); +} + + +void Assembler::sbc(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC); +} + + +void Assembler::sbcs(const Register& rd, + const Register& rn, + const Operand& operand) { + AddSubWithCarry(rd, rn, operand, SetFlags, SBC); +} + + +void Assembler::rmif(const Register& xn, unsigned rotation, StatusFlags flags) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFlagM)); + VIXL_ASSERT(xn.Is64Bits()); + Emit(RMIF | Rn(xn) | ImmRMIFRotation(rotation) | Nzcv(flags)); +} + + +void Assembler::setf8(const Register& rn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFlagM)); + Emit(SETF8 | Rn(rn)); +} + + +void Assembler::setf16(const Register& rn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFlagM)); + Emit(SETF16 | Rn(rn)); +} + + +void Assembler::ngc(const Register& rd, const Operand& operand) { + Register zr = AppropriateZeroRegFor(rd); + sbc(rd, zr, operand); +} + + +void Assembler::ngcs(const Register& rd, const Operand& operand) { + Register zr = AppropriateZeroRegFor(rd); + sbcs(rd, zr, operand); +} + + +// Logical instructions. +void Assembler::and_(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, AND); +} + + +void Assembler::ands(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, ANDS); +} + + +void Assembler::tst(const Register& rn, const Operand& operand) { + ands(AppropriateZeroRegFor(rn), rn, operand); +} + + +void Assembler::bic(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, BIC); +} + + +void Assembler::bics(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, BICS); +} + + +void Assembler::orr(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, ORR); +} + + +void Assembler::orn(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, ORN); +} + + +void Assembler::eor(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, EOR); +} + + +void Assembler::eon(const Register& rd, + const Register& rn, + const Operand& operand) { + Logical(rd, rn, operand, EON); +} + + +void Assembler::lslv(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); + Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd)); +} + + +void Assembler::lsrv(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); + Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd)); +} + + +void Assembler::asrv(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); + Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd)); +} + + +void Assembler::rorv(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); + Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd)); +} + + +// Bitfield operations. +void Assembler::bfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); + Emit(SF(rd) | BFM | N | ImmR(immr, rd.GetSizeInBits()) | + ImmS(imms, rn.GetSizeInBits()) | Rn(rn) | Rd(rd)); +} + + +void Assembler::sbfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms) { + VIXL_ASSERT(rd.Is64Bits() || rn.Is32Bits()); + Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); + Emit(SF(rd) | SBFM | N | ImmR(immr, rd.GetSizeInBits()) | + ImmS(imms, rn.GetSizeInBits()) | Rn(rn) | Rd(rd)); +} + + +void Assembler::ubfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); + Emit(SF(rd) | UBFM | N | ImmR(immr, rd.GetSizeInBits()) | + ImmS(imms, rn.GetSizeInBits()) | Rn(rn) | Rd(rd)); +} + + +void Assembler::extr(const Register& rd, + const Register& rn, + const Register& rm, + unsigned lsb) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); + Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); + Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rn.GetSizeInBits()) | Rn(rn) | + Rd(rd)); +} + + +void Assembler::csel(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + ConditionalSelect(rd, rn, rm, cond, CSEL); +} + + +void Assembler::csinc(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + ConditionalSelect(rd, rn, rm, cond, CSINC); +} + + +void Assembler::csinv(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + ConditionalSelect(rd, rn, rm, cond, CSINV); +} + + +void Assembler::csneg(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + ConditionalSelect(rd, rn, rm, cond, CSNEG); +} + + +void Assembler::cset(const Register& rd, Condition cond) { + VIXL_ASSERT((cond != al) && (cond != nv)); + Register zr = AppropriateZeroRegFor(rd); + csinc(rd, zr, zr, InvertCondition(cond)); +} + + +void Assembler::csetm(const Register& rd, Condition cond) { + VIXL_ASSERT((cond != al) && (cond != nv)); + Register zr = AppropriateZeroRegFor(rd); + csinv(rd, zr, zr, InvertCondition(cond)); +} + + +void Assembler::cinc(const Register& rd, const Register& rn, Condition cond) { + VIXL_ASSERT((cond != al) && (cond != nv)); + csinc(rd, rn, rn, InvertCondition(cond)); +} + + +void Assembler::cinv(const Register& rd, const Register& rn, Condition cond) { + VIXL_ASSERT((cond != al) && (cond != nv)); + csinv(rd, rn, rn, InvertCondition(cond)); +} + + +void Assembler::cneg(const Register& rd, const Register& rn, Condition cond) { + VIXL_ASSERT((cond != al) && (cond != nv)); + csneg(rd, rn, rn, InvertCondition(cond)); +} + + +void Assembler::ConditionalSelect(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond, + ConditionalSelectOp op) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); + Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd)); +} + + +void Assembler::ccmn(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond) { + ConditionalCompare(rn, operand, nzcv, cond, CCMN); +} + + +void Assembler::ccmp(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond) { + ConditionalCompare(rn, operand, nzcv, cond, CCMP); +} + + +void Assembler::DataProcessing3Source(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra, + DataProcessing3SourceOp op) { + Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd)); +} + + +void Assembler::crc32b(const Register& wd, + const Register& wn, + const Register& wm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); + VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits()); + Emit(SF(wm) | Rm(wm) | CRC32B | Rn(wn) | Rd(wd)); +} + + +void Assembler::crc32h(const Register& wd, + const Register& wn, + const Register& wm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); + VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits()); + Emit(SF(wm) | Rm(wm) | CRC32H | Rn(wn) | Rd(wd)); +} + + +void Assembler::crc32w(const Register& wd, + const Register& wn, + const Register& wm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); + VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits()); + Emit(SF(wm) | Rm(wm) | CRC32W | Rn(wn) | Rd(wd)); +} + + +void Assembler::crc32x(const Register& wd, + const Register& wn, + const Register& xm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); + VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && xm.Is64Bits()); + Emit(SF(xm) | Rm(xm) | CRC32X | Rn(wn) | Rd(wd)); +} + + +void Assembler::crc32cb(const Register& wd, + const Register& wn, + const Register& wm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); + VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits()); + Emit(SF(wm) | Rm(wm) | CRC32CB | Rn(wn) | Rd(wd)); +} + + +void Assembler::crc32ch(const Register& wd, + const Register& wn, + const Register& wm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); + VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits()); + Emit(SF(wm) | Rm(wm) | CRC32CH | Rn(wn) | Rd(wd)); +} + + +void Assembler::crc32cw(const Register& wd, + const Register& wn, + const Register& wm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); + VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && wm.Is32Bits()); + Emit(SF(wm) | Rm(wm) | CRC32CW | Rn(wn) | Rd(wd)); +} + + +void Assembler::crc32cx(const Register& wd, + const Register& wn, + const Register& xm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kCRC32)); + VIXL_ASSERT(wd.Is32Bits() && wn.Is32Bits() && xm.Is64Bits()); + Emit(SF(xm) | Rm(xm) | CRC32CX | Rn(wn) | Rd(wd)); +} + + +void Assembler::mul(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm)); + DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MADD); +} + + +void Assembler::madd(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + DataProcessing3Source(rd, rn, rm, ra, MADD); +} + + +void Assembler::mneg(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm)); + DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MSUB); +} + + +void Assembler::msub(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + DataProcessing3Source(rd, rn, rm, ra, MSUB); +} + + +void Assembler::umaddl(const Register& xd, + const Register& wn, + const Register& wm, + const Register& xa) { + VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits()); + VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits()); + DataProcessing3Source(xd, wn, wm, xa, UMADDL_x); +} + + +void Assembler::smaddl(const Register& xd, + const Register& wn, + const Register& wm, + const Register& xa) { + VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits()); + VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits()); + DataProcessing3Source(xd, wn, wm, xa, SMADDL_x); +} + + +void Assembler::umsubl(const Register& xd, + const Register& wn, + const Register& wm, + const Register& xa) { + VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits()); + VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits()); + DataProcessing3Source(xd, wn, wm, xa, UMSUBL_x); +} + + +void Assembler::smsubl(const Register& xd, + const Register& wn, + const Register& wm, + const Register& xa) { + VIXL_ASSERT(xd.Is64Bits() && xa.Is64Bits()); + VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits()); + DataProcessing3Source(xd, wn, wm, xa, SMSUBL_x); +} + + +void Assembler::smull(const Register& xd, + const Register& wn, + const Register& wm) { + VIXL_ASSERT(xd.Is64Bits()); + VIXL_ASSERT(wn.Is32Bits() && wm.Is32Bits()); + DataProcessing3Source(xd, wn, wm, xzr, SMADDL_x); +} + + +void Assembler::sdiv(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); + Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd)); +} + + +void Assembler::smulh(const Register& xd, + const Register& xn, + const Register& xm) { + VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits()); + DataProcessing3Source(xd, xn, xm, xzr, SMULH_x); +} + + +void Assembler::umulh(const Register& xd, + const Register& xn, + const Register& xm) { + VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits()); + DataProcessing3Source(xd, xn, xm, xzr, UMULH_x); +} + + +void Assembler::udiv(const Register& rd, + const Register& rn, + const Register& rm) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + VIXL_ASSERT(rd.GetSizeInBits() == rm.GetSizeInBits()); + Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd)); +} + + +void Assembler::rbit(const Register& rd, const Register& rn) { + DataProcessing1Source(rd, rn, RBIT); +} + + +void Assembler::rev16(const Register& rd, const Register& rn) { + DataProcessing1Source(rd, rn, REV16); +} + + +void Assembler::rev32(const Register& xd, const Register& xn) { + VIXL_ASSERT(xd.Is64Bits()); + DataProcessing1Source(xd, xn, REV); +} + + +void Assembler::rev(const Register& rd, const Register& rn) { + DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w); +} + + +void Assembler::clz(const Register& rd, const Register& rn) { + DataProcessing1Source(rd, rn, CLZ); +} + + +void Assembler::cls(const Register& rd, const Register& rn) { + DataProcessing1Source(rd, rn, CLS); +} + +#define PAUTH_VARIATIONS(V) \ + V(paci, PACI) \ + V(pacd, PACD) \ + V(auti, AUTI) \ + V(autd, AUTD) + +#define DEFINE_ASM_FUNCS(PRE, OP) \ + void Assembler::PRE##a(const Register& xd, const Register& xn) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); \ + VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits()); \ + Emit(SF(xd) | OP##A | Rd(xd) | RnSP(xn)); \ + } \ + \ + void Assembler::PRE##za(const Register& xd) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); \ + VIXL_ASSERT(xd.Is64Bits()); \ + Emit(SF(xd) | OP##ZA | Rd(xd)); \ + } \ + \ + void Assembler::PRE##b(const Register& xd, const Register& xn) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); \ + VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits()); \ + Emit(SF(xd) | OP##B | Rd(xd) | RnSP(xn)); \ + } \ + \ + void Assembler::PRE##zb(const Register& xd) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); \ + VIXL_ASSERT(xd.Is64Bits()); \ + Emit(SF(xd) | OP##ZB | Rd(xd)); \ + } + +PAUTH_VARIATIONS(DEFINE_ASM_FUNCS) +#undef DEFINE_ASM_FUNCS + +void Assembler::pacga(const Register& xd, + const Register& xn, + const Register& xm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth, CPUFeatures::kPAuthGeneric)); + VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits()); + Emit(SF(xd) | PACGA | Rd(xd) | Rn(xn) | RmSP(xm)); +} + +void Assembler::xpaci(const Register& xd) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xd.Is64Bits()); + Emit(SF(xd) | XPACI | Rd(xd)); +} + +void Assembler::xpacd(const Register& xd) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + VIXL_ASSERT(xd.Is64Bits()); + Emit(SF(xd) | XPACD | Rd(xd)); +} + + +void Assembler::ldp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& src) { + LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2)); +} + + +void Assembler::stp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& dst) { + LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2)); +} + + +void Assembler::ldpsw(const Register& xt, + const Register& xt2, + const MemOperand& src) { + VIXL_ASSERT(xt.Is64Bits() && xt2.Is64Bits()); + LoadStorePair(xt, xt2, src, LDPSW_x); +} + + +void Assembler::LoadStorePair(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairOp op) { + VIXL_ASSERT(CPUHas(rt, rt2)); + + // 'rt' and 'rt2' can only be aliased for stores. + VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2)); + VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); + VIXL_ASSERT(IsImmLSPair(addr.GetOffset(), CalcLSPairDataSize(op))); + + int offset = static_cast(addr.GetOffset()); + Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.GetBaseRegister()) | + ImmLSPair(offset, CalcLSPairDataSize(op)); + + Instr addrmodeop; + if (addr.IsImmediateOffset()) { + addrmodeop = LoadStorePairOffsetFixed; + } else { + if (addr.IsPreIndex()) { + addrmodeop = LoadStorePairPreIndexFixed; + } else { + VIXL_ASSERT(addr.IsPostIndex()); + addrmodeop = LoadStorePairPostIndexFixed; + } + } + Emit(addrmodeop | memop); +} + + +void Assembler::ldnp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& src) { + LoadStorePairNonTemporal(rt, rt2, src, LoadPairNonTemporalOpFor(rt, rt2)); +} + + +void Assembler::stnp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& dst) { + LoadStorePairNonTemporal(rt, rt2, dst, StorePairNonTemporalOpFor(rt, rt2)); +} + + +void Assembler::LoadStorePairNonTemporal(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairNonTemporalOp op) { + VIXL_ASSERT(CPUHas(rt, rt2)); + + VIXL_ASSERT(!rt.Is(rt2)); + VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); + VIXL_ASSERT(addr.IsImmediateOffset()); + + unsigned size = + CalcLSPairDataSize(static_cast(op & LoadStorePairMask)); + VIXL_ASSERT(IsImmLSPair(addr.GetOffset(), size)); + int offset = static_cast(addr.GetOffset()); + Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.GetBaseRegister()) | + ImmLSPair(offset, size)); +} + + +// Memory instructions. +void Assembler::ldrb(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, src, LDRB_w, option); +} + + +void Assembler::strb(const Register& rt, + const MemOperand& dst, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, dst, STRB_w, option); +} + + +void Assembler::ldrsb(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option); +} + + +void Assembler::ldrh(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, src, LDRH_w, option); +} + + +void Assembler::strh(const Register& rt, + const MemOperand& dst, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, dst, STRH_w, option); +} + + +void Assembler::ldrsh(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option); +} + + +void Assembler::ldr(const CPURegister& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, src, LoadOpFor(rt), option); +} + + +void Assembler::str(const CPURegister& rt, + const MemOperand& dst, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(rt, dst, StoreOpFor(rt), option); +} + + +void Assembler::ldrsw(const Register& xt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(xt.Is64Bits()); + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + LoadStore(xt, src, LDRSW_x, option); +} + + +void Assembler::ldurb(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, src, LDRB_w, option); +} + + +void Assembler::sturb(const Register& rt, + const MemOperand& dst, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, dst, STRB_w, option); +} + + +void Assembler::ldursb(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option); +} + + +void Assembler::ldurh(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, src, LDRH_w, option); +} + + +void Assembler::sturh(const Register& rt, + const MemOperand& dst, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, dst, STRH_w, option); +} + + +void Assembler::ldursh(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option); +} + + +void Assembler::ldur(const CPURegister& rt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, src, LoadOpFor(rt), option); +} + + +void Assembler::stur(const CPURegister& rt, + const MemOperand& dst, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(rt, dst, StoreOpFor(rt), option); +} + + +void Assembler::ldursw(const Register& xt, + const MemOperand& src, + LoadStoreScalingOption option) { + VIXL_ASSERT(xt.Is64Bits()); + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + LoadStore(xt, src, LDRSW_x, option); +} + + +void Assembler::ldraa(const Register& xt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + LoadStorePAC(xt, src, LDRAA); +} + + +void Assembler::ldrab(const Register& xt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + LoadStorePAC(xt, src, LDRAB); +} + + +void Assembler::ldrsw(const Register& xt, RawLiteral* literal) { + VIXL_ASSERT(xt.Is64Bits()); + VIXL_ASSERT(literal->GetSize() == kWRegSizeInBytes); + ldrsw(xt, static_cast(LinkAndGetWordOffsetTo(literal))); +} + + +void Assembler::ldr(const CPURegister& rt, RawLiteral* literal) { + VIXL_ASSERT(CPUHas(rt)); + VIXL_ASSERT(literal->GetSize() == static_cast(rt.GetSizeInBytes())); + ldr(rt, static_cast(LinkAndGetWordOffsetTo(literal))); +} + + +void Assembler::ldrsw(const Register& rt, int64_t imm19) { + Emit(LDRSW_x_lit | ImmLLiteral(imm19) | Rt(rt)); +} + + +void Assembler::ldr(const CPURegister& rt, int64_t imm19) { + VIXL_ASSERT(CPUHas(rt)); + LoadLiteralOp op = LoadLiteralOpFor(rt); + Emit(op | ImmLLiteral(imm19) | Rt(rt)); +} + + +void Assembler::prfm(PrefetchOperation op, int64_t imm19) { + Emit(PRFM_lit | ImmPrefetchOperation(op) | ImmLLiteral(imm19)); +} + + +// Exclusive-access instructions. +void Assembler::stxrb(const Register& rs, + const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + Emit(STXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::stxrh(const Register& rs, + const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + Emit(STXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::stxr(const Register& rs, + const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? STXR_x : STXR_w; + Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::ldxrb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + Emit(LDXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::ldxrh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + Emit(LDXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::ldxr(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? LDXR_x : LDXR_w; + Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::stxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst) { + VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits()); + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? STXP_x : STXP_w; + Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::ldxp(const Register& rt, + const Register& rt2, + const MemOperand& src) { + VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits()); + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? LDXP_x : LDXP_w; + Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.GetBaseRegister())); +} + + +void Assembler::stlxrb(const Register& rs, + const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + Emit(STLXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::stlxrh(const Register& rs, + const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + Emit(STLXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::stlxr(const Register& rs, + const Register& rt, + const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? STLXR_x : STLXR_w; + Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::ldaxrb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + Emit(LDAXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::ldaxrh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + Emit(LDAXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::ldaxr(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? LDAXR_x : LDAXR_w; + Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::stlxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst) { + VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits()); + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? STLXP_x : STLXP_w; + Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::ldaxp(const Register& rt, + const Register& rt2, + const MemOperand& src) { + VIXL_ASSERT(rt.GetSizeInBits() == rt2.GetSizeInBits()); + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? LDAXP_x : LDAXP_w; + Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.GetBaseRegister())); +} + + +void Assembler::stlrb(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + Emit(STLRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + +void Assembler::stlurb(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(CPUHas(CPUFeatures::kRCpcImm)); + VIXL_ASSERT(dst.IsImmediateOffset() && IsImmLSUnscaled(dst.GetOffset())); + + Instr base = RnSP(dst.GetBaseRegister()); + int64_t offset = dst.GetOffset(); + Emit(STLURB | Rt(rt) | base | ImmLS(static_cast(offset))); +} + + +void Assembler::stlrh(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + Emit(STLRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + +void Assembler::stlurh(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(CPUHas(CPUFeatures::kRCpcImm)); + VIXL_ASSERT(dst.IsImmediateOffset() && IsImmLSUnscaled(dst.GetOffset())); + + Instr base = RnSP(dst.GetBaseRegister()); + int64_t offset = dst.GetOffset(); + Emit(STLURH | Rt(rt) | base | ImmLS(static_cast(offset))); +} + + +void Assembler::stlr(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? STLR_x : STLR_w; + Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + +void Assembler::stlur(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(CPUHas(CPUFeatures::kRCpcImm)); + VIXL_ASSERT(dst.IsImmediateOffset() && IsImmLSUnscaled(dst.GetOffset())); + + Instr base = RnSP(dst.GetBaseRegister()); + int64_t offset = dst.GetOffset(); + Instr op = rt.Is64Bits() ? STLUR_x : STLUR_w; + Emit(op | Rt(rt) | base | ImmLS(static_cast(offset))); +} + + +void Assembler::ldarb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + Emit(LDARB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::ldarh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + Emit(LDARH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::ldar(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? LDAR_x : LDAR_w; + Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::stllrb(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions)); + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + Emit(STLLRB | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::stllrh(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions)); + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + Emit(STLLRH | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::stllr(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions)); + VIXL_ASSERT(dst.IsImmediateOffset() && (dst.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? STLLR_x : STLLR_w; + Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.GetBaseRegister())); +} + + +void Assembler::ldlarb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions)); + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + Emit(LDLARB | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::ldlarh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions)); + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + Emit(LDLARH | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +void Assembler::ldlar(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kLORegions)); + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + LoadStoreExclusive op = rt.Is64Bits() ? LDLAR_x : LDLAR_w; + Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); +} + + +// clang-format off +#define COMPARE_AND_SWAP_W_X_LIST(V) \ + V(cas, CAS) \ + V(casa, CASA) \ + V(casl, CASL) \ + V(casal, CASAL) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP) \ + void Assembler::FN(const Register& rs, \ + const Register& rt, \ + const MemOperand& src) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \ + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); \ + LoadStoreExclusive op = rt.Is64Bits() ? OP##_x : OP##_w; \ + Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); \ + } +COMPARE_AND_SWAP_W_X_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +// clang-format off +#define COMPARE_AND_SWAP_W_LIST(V) \ + V(casb, CASB) \ + V(casab, CASAB) \ + V(caslb, CASLB) \ + V(casalb, CASALB) \ + V(cash, CASH) \ + V(casah, CASAH) \ + V(caslh, CASLH) \ + V(casalh, CASALH) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP) \ + void Assembler::FN(const Register& rs, \ + const Register& rt, \ + const MemOperand& src) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \ + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); \ + Emit(OP | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); \ + } +COMPARE_AND_SWAP_W_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +// clang-format off +#define COMPARE_AND_SWAP_PAIR_LIST(V) \ + V(casp, CASP) \ + V(caspa, CASPA) \ + V(caspl, CASPL) \ + V(caspal, CASPAL) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP) \ + void Assembler::FN(const Register& rs, \ + const Register& rs1, \ + const Register& rt, \ + const Register& rt1, \ + const MemOperand& src) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \ + USE(rs1, rt1); \ + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); \ + VIXL_ASSERT(AreEven(rs, rt)); \ + VIXL_ASSERT(AreConsecutive(rs, rs1)); \ + VIXL_ASSERT(AreConsecutive(rt, rt1)); \ + LoadStoreExclusive op = rt.Is64Bits() ? OP##_x : OP##_w; \ + Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(src.GetBaseRegister())); \ + } +COMPARE_AND_SWAP_PAIR_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +// These macros generate all the variations of the atomic memory operations, +// e.g. ldadd, ldadda, ldaddb, staddl, etc. +// For a full list of the methods with comments, see the assembler header file. + +// clang-format off +#define ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(V, DEF) \ + V(DEF, add, LDADD) \ + V(DEF, clr, LDCLR) \ + V(DEF, eor, LDEOR) \ + V(DEF, set, LDSET) \ + V(DEF, smax, LDSMAX) \ + V(DEF, smin, LDSMIN) \ + V(DEF, umax, LDUMAX) \ + V(DEF, umin, LDUMIN) + +#define ATOMIC_MEMORY_STORE_MODES(V, NAME, OP) \ + V(NAME, OP##_x, OP##_w) \ + V(NAME##l, OP##L_x, OP##L_w) \ + V(NAME##b, OP##B, OP##B) \ + V(NAME##lb, OP##LB, OP##LB) \ + V(NAME##h, OP##H, OP##H) \ + V(NAME##lh, OP##LH, OP##LH) + +#define ATOMIC_MEMORY_LOAD_MODES(V, NAME, OP) \ + ATOMIC_MEMORY_STORE_MODES(V, NAME, OP) \ + V(NAME##a, OP##A_x, OP##A_w) \ + V(NAME##al, OP##AL_x, OP##AL_w) \ + V(NAME##ab, OP##AB, OP##AB) \ + V(NAME##alb, OP##ALB, OP##ALB) \ + V(NAME##ah, OP##AH, OP##AH) \ + V(NAME##alh, OP##ALH, OP##ALH) +// clang-format on + +#define DEFINE_ASM_LOAD_FUNC(FN, OP_X, OP_W) \ + void Assembler::ld##FN(const Register& rs, \ + const Register& rt, \ + const MemOperand& src) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \ + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); \ + AtomicMemoryOp op = rt.Is64Bits() ? OP_X : OP_W; \ + Emit(op | Rs(rs) | Rt(rt) | RnSP(src.GetBaseRegister())); \ + } +#define DEFINE_ASM_STORE_FUNC(FN, OP_X, OP_W) \ + void Assembler::st##FN(const Register& rs, const MemOperand& src) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \ + ld##FN(rs, AppropriateZeroRegFor(rs), src); \ + } + +ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(ATOMIC_MEMORY_LOAD_MODES, + DEFINE_ASM_LOAD_FUNC) +ATOMIC_MEMORY_SIMPLE_OPERATION_LIST(ATOMIC_MEMORY_STORE_MODES, + DEFINE_ASM_STORE_FUNC) + +#define DEFINE_ASM_SWP_FUNC(FN, OP_X, OP_W) \ + void Assembler::FN(const Register& rs, \ + const Register& rt, \ + const MemOperand& src) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kAtomics)); \ + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); \ + AtomicMemoryOp op = rt.Is64Bits() ? OP_X : OP_W; \ + Emit(op | Rs(rs) | Rt(rt) | RnSP(src.GetBaseRegister())); \ + } + +ATOMIC_MEMORY_LOAD_MODES(DEFINE_ASM_SWP_FUNC, swp, SWP) + +#undef DEFINE_ASM_LOAD_FUNC +#undef DEFINE_ASM_STORE_FUNC +#undef DEFINE_ASM_SWP_FUNC + + +void Assembler::ldaprb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc)); + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + AtomicMemoryOp op = LDAPRB; + Emit(op | Rs(xzr) | Rt(rt) | RnSP(src.GetBaseRegister())); +} + +void Assembler::ldapurb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc, CPUFeatures::kRCpcImm)); + VIXL_ASSERT(src.IsImmediateOffset() && IsImmLSUnscaled(src.GetOffset())); + + Instr base = RnSP(src.GetBaseRegister()); + int64_t offset = src.GetOffset(); + Emit(LDAPURB | Rt(rt) | base | ImmLS(static_cast(offset))); +} + +void Assembler::ldapursb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc, CPUFeatures::kRCpcImm)); + VIXL_ASSERT(src.IsImmediateOffset() && IsImmLSUnscaled(src.GetOffset())); + + Instr base = RnSP(src.GetBaseRegister()); + int64_t offset = src.GetOffset(); + Instr op = rt.Is64Bits() ? LDAPURSB_x : LDAPURSB_w; + Emit(op | Rt(rt) | base | ImmLS(static_cast(offset))); +} + +void Assembler::ldaprh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc)); + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + AtomicMemoryOp op = LDAPRH; + Emit(op | Rs(xzr) | Rt(rt) | RnSP(src.GetBaseRegister())); +} + +void Assembler::ldapurh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc, CPUFeatures::kRCpcImm)); + VIXL_ASSERT(src.IsImmediateOffset() && IsImmLSUnscaled(src.GetOffset())); + + Instr base = RnSP(src.GetBaseRegister()); + int64_t offset = src.GetOffset(); + Emit(LDAPURH | Rt(rt) | base | ImmLS(static_cast(offset))); +} + +void Assembler::ldapursh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc, CPUFeatures::kRCpcImm)); + VIXL_ASSERT(src.IsImmediateOffset() && IsImmLSUnscaled(src.GetOffset())); + + Instr base = RnSP(src.GetBaseRegister()); + int64_t offset = src.GetOffset(); + LoadStoreRCpcUnscaledOffsetOp op = rt.Is64Bits() ? LDAPURSH_x : LDAPURSH_w; + Emit(op | Rt(rt) | base | ImmLS(static_cast(offset))); +} + +void Assembler::ldapr(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc)); + VIXL_ASSERT(src.IsImmediateOffset() && (src.GetOffset() == 0)); + AtomicMemoryOp op = rt.Is64Bits() ? LDAPR_x : LDAPR_w; + Emit(op | Rs(xzr) | Rt(rt) | RnSP(src.GetBaseRegister())); +} + +void Assembler::ldapur(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc, CPUFeatures::kRCpcImm)); + VIXL_ASSERT(src.IsImmediateOffset() && IsImmLSUnscaled(src.GetOffset())); + + Instr base = RnSP(src.GetBaseRegister()); + int64_t offset = src.GetOffset(); + LoadStoreRCpcUnscaledOffsetOp op = rt.Is64Bits() ? LDAPUR_x : LDAPUR_w; + Emit(op | Rt(rt) | base | ImmLS(static_cast(offset))); +} + +void Assembler::ldapursw(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kRCpc, CPUFeatures::kRCpcImm)); + VIXL_ASSERT(rt.Is64Bits()); + VIXL_ASSERT(src.IsImmediateOffset() && IsImmLSUnscaled(src.GetOffset())); + + Instr base = RnSP(src.GetBaseRegister()); + int64_t offset = src.GetOffset(); + Emit(LDAPURSW | Rt(rt) | base | ImmLS(static_cast(offset))); +} + +void Assembler::prfm(PrefetchOperation op, + const MemOperand& address, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireUnscaledOffset); + VIXL_ASSERT(option != PreferUnscaledOffset); + Prefetch(op, address, option); +} + + +void Assembler::prfum(PrefetchOperation op, + const MemOperand& address, + LoadStoreScalingOption option) { + VIXL_ASSERT(option != RequireScaledOffset); + VIXL_ASSERT(option != PreferScaledOffset); + Prefetch(op, address, option); +} + + +void Assembler::prfm(PrefetchOperation op, RawLiteral* literal) { + prfm(op, static_cast(LinkAndGetWordOffsetTo(literal))); +} + + +void Assembler::sys(int op1, int crn, int crm, int op2, const Register& xt) { + VIXL_ASSERT(xt.Is64Bits()); + Emit(SYS | ImmSysOp1(op1) | CRn(crn) | CRm(crm) | ImmSysOp2(op2) | Rt(xt)); +} + + +void Assembler::sys(int op, const Register& xt) { + VIXL_ASSERT(xt.Is64Bits()); + Emit(SYS | SysOp(op) | Rt(xt)); +} + + +void Assembler::dc(DataCacheOp op, const Register& rt) { + if (op == CVAP) VIXL_ASSERT(CPUHas(CPUFeatures::kDCPoP)); + sys(op, rt); +} + + +void Assembler::ic(InstructionCacheOp op, const Register& rt) { + VIXL_ASSERT(op == IVAU); + sys(op, rt); +} + + +void Assembler::hint(SystemHint code) { hint(static_cast(code)); } + + +void Assembler::hint(int imm7) { + VIXL_ASSERT(IsUint7(imm7)); + Emit(HINT | ImmHint(imm7) | Rt(xzr)); +} + + +// NEON structure loads and stores. +Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) { + Instr addr_field = RnSP(addr.GetBaseRegister()); + + if (addr.IsPostIndex()) { + VIXL_STATIC_ASSERT(NEONLoadStoreMultiStructPostIndex == + static_cast( + NEONLoadStoreSingleStructPostIndex)); + + addr_field |= NEONLoadStoreMultiStructPostIndex; + if (addr.GetOffset() == 0) { + addr_field |= RmNot31(addr.GetRegisterOffset()); + } else { + // The immediate post index addressing mode is indicated by rm = 31. + // The immediate is implied by the number of vector registers used. + addr_field |= (0x1f << Rm_offset); + } + } else { + VIXL_ASSERT(addr.IsImmediateOffset() && (addr.GetOffset() == 0)); + } + return addr_field; +} + +void Assembler::LoadStoreStructVerify(const VRegister& vt, + const MemOperand& addr, + Instr op) { +#ifdef VIXL_DEBUG + // Assert that addressing mode is either offset (with immediate 0), post + // index by immediate of the size of the register list, or post index by a + // value in a core register. + if (addr.IsImmediateOffset()) { + VIXL_ASSERT(addr.GetOffset() == 0); + } else { + int offset = vt.GetSizeInBytes(); + switch (op) { + case NEON_LD1_1v: + case NEON_ST1_1v: + offset *= 1; + break; + case NEONLoadStoreSingleStructLoad1: + case NEONLoadStoreSingleStructStore1: + case NEON_LD1R: + offset = (offset / vt.GetLanes()) * 1; + break; + + case NEON_LD1_2v: + case NEON_ST1_2v: + case NEON_LD2: + case NEON_ST2: + offset *= 2; + break; + case NEONLoadStoreSingleStructLoad2: + case NEONLoadStoreSingleStructStore2: + case NEON_LD2R: + offset = (offset / vt.GetLanes()) * 2; + break; + + case NEON_LD1_3v: + case NEON_ST1_3v: + case NEON_LD3: + case NEON_ST3: + offset *= 3; + break; + case NEONLoadStoreSingleStructLoad3: + case NEONLoadStoreSingleStructStore3: + case NEON_LD3R: + offset = (offset / vt.GetLanes()) * 3; + break; + + case NEON_LD1_4v: + case NEON_ST1_4v: + case NEON_LD4: + case NEON_ST4: + offset *= 4; + break; + case NEONLoadStoreSingleStructLoad4: + case NEONLoadStoreSingleStructStore4: + case NEON_LD4R: + offset = (offset / vt.GetLanes()) * 4; + break; + default: + VIXL_UNREACHABLE(); + } + VIXL_ASSERT(!addr.GetRegisterOffset().Is(NoReg) || + addr.GetOffset() == offset); + } +#else + USE(vt, addr, op); +#endif +} + +void Assembler::LoadStoreStruct(const VRegister& vt, + const MemOperand& addr, + NEONLoadStoreMultiStructOp op) { + LoadStoreStructVerify(vt, addr, op); + VIXL_ASSERT(vt.IsVector() || vt.Is1D()); + Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt)); +} + + +void Assembler::LoadStoreStructSingleAllLanes(const VRegister& vt, + const MemOperand& addr, + NEONLoadStoreSingleStructOp op) { + LoadStoreStructVerify(vt, addr, op); + Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt)); +} + + +void Assembler::ld1(const VRegister& vt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + LoadStoreStruct(vt, src, NEON_LD1_1v); +} + + +void Assembler::ld1(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src) { + USE(vt2); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStruct(vt, src, NEON_LD1_2v); +} + + +void Assembler::ld1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + USE(vt2, vt3); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStruct(vt, src, NEON_LD1_3v); +} + + +void Assembler::ld1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStruct(vt, src, NEON_LD1_4v); +} + + +void Assembler::ld2(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src) { + USE(vt2); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStruct(vt, src, NEON_LD2); +} + + +void Assembler::ld2(const VRegister& vt, + const VRegister& vt2, + int lane, + const MemOperand& src) { + USE(vt2); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad2); +} + + +void Assembler::ld2r(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src) { + USE(vt2); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStructSingleAllLanes(vt, src, NEON_LD2R); +} + + +void Assembler::ld3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + USE(vt2, vt3); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStruct(vt, src, NEON_LD3); +} + + +void Assembler::ld3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + int lane, + const MemOperand& src) { + USE(vt2, vt3); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad3); +} + + +void Assembler::ld3r(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + USE(vt2, vt3); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStructSingleAllLanes(vt, src, NEON_LD3R); +} + + +void Assembler::ld4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStruct(vt, src, NEON_LD4); +} + + +void Assembler::ld4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + int lane, + const MemOperand& src) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad4); +} + + +void Assembler::ld4r(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStructSingleAllLanes(vt, src, NEON_LD4R); +} + + +void Assembler::st1(const VRegister& vt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + LoadStoreStruct(vt, src, NEON_ST1_1v); +} + + +void Assembler::st1(const VRegister& vt, + const VRegister& vt2, + const MemOperand& src) { + USE(vt2); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStruct(vt, src, NEON_ST1_2v); +} + + +void Assembler::st1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + USE(vt2, vt3); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStruct(vt, src, NEON_ST1_3v); +} + + +void Assembler::st1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStruct(vt, src, NEON_ST1_4v); +} + + +void Assembler::st2(const VRegister& vt, + const VRegister& vt2, + const MemOperand& dst) { + USE(vt2); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStruct(vt, dst, NEON_ST2); +} + + +void Assembler::st2(const VRegister& vt, + const VRegister& vt2, + int lane, + const MemOperand& dst) { + USE(vt2); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2)); + VIXL_ASSERT(AreConsecutive(vt, vt2)); + LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore2); +} + + +void Assembler::st3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& dst) { + USE(vt2, vt3); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStruct(vt, dst, NEON_ST3); +} + + +void Assembler::st3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + int lane, + const MemOperand& dst) { + USE(vt2, vt3); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3)); + LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore3); +} + + +void Assembler::st4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& dst) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStruct(vt, dst, NEON_ST4); +} + + +void Assembler::st4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + int lane, + const MemOperand& dst) { + USE(vt2, vt3, vt4); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4)); + VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4)); + LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore4); +} + + +void Assembler::LoadStoreStructSingle(const VRegister& vt, + uint32_t lane, + const MemOperand& addr, + NEONLoadStoreSingleStructOp op) { + LoadStoreStructVerify(vt, addr, op); + + // We support vt arguments of the form vt.VxT() or vt.T(), where x is the + // number of lanes, and T is b, h, s or d. + unsigned lane_size = vt.GetLaneSizeInBytes(); + VIXL_ASSERT(lane < (kQRegSizeInBytes / lane_size)); + + // Lane size is encoded in the opcode field. Lane index is encoded in the Q, + // S and size fields. + lane *= lane_size; + if (lane_size == 8) lane++; + + Instr size = (lane << NEONLSSize_offset) & NEONLSSize_mask; + Instr s = (lane << (NEONS_offset - 2)) & NEONS_mask; + Instr q = (lane << (NEONQ_offset - 3)) & NEONQ_mask; + + Instr instr = op; + switch (lane_size) { + case 1: + instr |= NEONLoadStoreSingle_b; + break; + case 2: + instr |= NEONLoadStoreSingle_h; + break; + case 4: + instr |= NEONLoadStoreSingle_s; + break; + default: + VIXL_ASSERT(lane_size == 8); + instr |= NEONLoadStoreSingle_d; + } + + Emit(instr | LoadStoreStructAddrModeField(addr) | q | size | s | Rt(vt)); +} + + +void Assembler::ld1(const VRegister& vt, int lane, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad1); +} + + +void Assembler::ld1r(const VRegister& vt, const MemOperand& src) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + LoadStoreStructSingleAllLanes(vt, src, NEON_LD1R); +} + + +void Assembler::st1(const VRegister& vt, int lane, const MemOperand& dst) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore1); +} + + +void Assembler::NEON3DifferentL(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3DifferentOp vop) { + VIXL_ASSERT(AreSameFormat(vn, vm)); + VIXL_ASSERT((vn.Is1H() && vd.Is1S()) || (vn.Is1S() && vd.Is1D()) || + (vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) || + (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) || + (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D())); + Instr format, op = vop; + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + format = SFormat(vn); + } else { + format = VFormat(vn); + } + Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEON3DifferentW(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3DifferentOp vop) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT((vm.Is8B() && vd.Is8H()) || (vm.Is4H() && vd.Is4S()) || + (vm.Is2S() && vd.Is2D()) || (vm.Is16B() && vd.Is8H()) || + (vm.Is8H() && vd.Is4S()) || (vm.Is4S() && vd.Is2D())); + Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEON3DifferentHN(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3DifferentOp vop) { + VIXL_ASSERT(AreSameFormat(vm, vn)); + VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) || + (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) || + (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D())); + Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +// clang-format off +#define NEON_3DIFF_LONG_LIST(V) \ + V(pmull, NEON_PMULL, vn.IsVector() && vn.Is8B()) \ + V(pmull2, NEON_PMULL2, vn.IsVector() && vn.Is16B()) \ + V(saddl, NEON_SADDL, vn.IsVector() && vn.IsD()) \ + V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ()) \ + V(sabal, NEON_SABAL, vn.IsVector() && vn.IsD()) \ + V(sabal2, NEON_SABAL2, vn.IsVector() && vn.IsQ()) \ + V(uabal, NEON_UABAL, vn.IsVector() && vn.IsD()) \ + V(uabal2, NEON_UABAL2, vn.IsVector() && vn.IsQ()) \ + V(sabdl, NEON_SABDL, vn.IsVector() && vn.IsD()) \ + V(sabdl2, NEON_SABDL2, vn.IsVector() && vn.IsQ()) \ + V(uabdl, NEON_UABDL, vn.IsVector() && vn.IsD()) \ + V(uabdl2, NEON_UABDL2, vn.IsVector() && vn.IsQ()) \ + V(smlal, NEON_SMLAL, vn.IsVector() && vn.IsD()) \ + V(smlal2, NEON_SMLAL2, vn.IsVector() && vn.IsQ()) \ + V(umlal, NEON_UMLAL, vn.IsVector() && vn.IsD()) \ + V(umlal2, NEON_UMLAL2, vn.IsVector() && vn.IsQ()) \ + V(smlsl, NEON_SMLSL, vn.IsVector() && vn.IsD()) \ + V(smlsl2, NEON_SMLSL2, vn.IsVector() && vn.IsQ()) \ + V(umlsl, NEON_UMLSL, vn.IsVector() && vn.IsD()) \ + V(umlsl2, NEON_UMLSL2, vn.IsVector() && vn.IsQ()) \ + V(smull, NEON_SMULL, vn.IsVector() && vn.IsD()) \ + V(smull2, NEON_SMULL2, vn.IsVector() && vn.IsQ()) \ + V(umull, NEON_UMULL, vn.IsVector() && vn.IsD()) \ + V(umull2, NEON_UMULL2, vn.IsVector() && vn.IsQ()) \ + V(ssubl, NEON_SSUBL, vn.IsVector() && vn.IsD()) \ + V(ssubl2, NEON_SSUBL2, vn.IsVector() && vn.IsQ()) \ + V(uaddl, NEON_UADDL, vn.IsVector() && vn.IsD()) \ + V(uaddl2, NEON_UADDL2, vn.IsVector() && vn.IsQ()) \ + V(usubl, NEON_USUBL, vn.IsVector() && vn.IsD()) \ + V(usubl2, NEON_USUBL2, vn.IsVector() && vn.IsQ()) \ + V(sqdmlal, NEON_SQDMLAL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ + V(sqdmlal2, NEON_SQDMLAL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ + V(sqdmlsl, NEON_SQDMLSL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ + V(sqdmlsl2, NEON_SQDMLSL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ + V(sqdmull, NEON_SQDMULL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ + V(sqdmull2, NEON_SQDMULL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ +// clang-format on + + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ +void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + VIXL_ASSERT(AS); \ + NEON3DifferentL(vd, vn, vm, OP); \ +} +NEON_3DIFF_LONG_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +// clang-format off +#define NEON_3DIFF_HN_LIST(V) \ + V(addhn, NEON_ADDHN, vd.IsD()) \ + V(addhn2, NEON_ADDHN2, vd.IsQ()) \ + V(raddhn, NEON_RADDHN, vd.IsD()) \ + V(raddhn2, NEON_RADDHN2, vd.IsQ()) \ + V(subhn, NEON_SUBHN, vd.IsD()) \ + V(subhn2, NEON_SUBHN2, vd.IsQ()) \ + V(rsubhn, NEON_RSUBHN, vd.IsD()) \ + V(rsubhn2, NEON_RSUBHN2, vd.IsQ()) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ + void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + VIXL_ASSERT(AS); \ + NEON3DifferentHN(vd, vn, vm, OP); \ + } +NEON_3DIFF_HN_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +void Assembler::uaddw(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vm.IsD()); + NEON3DifferentW(vd, vn, vm, NEON_UADDW); +} + + +void Assembler::uaddw2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vm.IsQ()); + NEON3DifferentW(vd, vn, vm, NEON_UADDW2); +} + + +void Assembler::saddw(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vm.IsD()); + NEON3DifferentW(vd, vn, vm, NEON_SADDW); +} + + +void Assembler::saddw2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vm.IsQ()); + NEON3DifferentW(vd, vn, vm, NEON_SADDW2); +} + + +void Assembler::usubw(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vm.IsD()); + NEON3DifferentW(vd, vn, vm, NEON_USUBW); +} + + +void Assembler::usubw2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vm.IsQ()); + NEON3DifferentW(vd, vn, vm, NEON_USUBW2); +} + + +void Assembler::ssubw(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vm.IsD()); + NEON3DifferentW(vd, vn, vm, NEON_SSUBW); +} + + +void Assembler::ssubw2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vm.IsQ()); + NEON3DifferentW(vd, vn, vm, NEON_SSUBW2); +} + + +void Assembler::mov(const Register& rd, const Register& rm) { + // Moves involving the stack pointer are encoded as add immediate with + // second operand of zero. Otherwise, orr with first operand zr is + // used. + if (rd.IsSP() || rm.IsSP()) { + add(rd, rm, 0); + } else { + orr(rd, AppropriateZeroRegFor(rd), rm); + } +} + +void Assembler::xpaclri() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(XPACLRI); +} + +void Assembler::pacia1716() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(PACIA1716); +} + +void Assembler::pacib1716() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(PACIB1716); +} + +void Assembler::autia1716() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(AUTIA1716); +} + +void Assembler::autib1716() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(AUTIB1716); +} + +void Assembler::paciaz() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(PACIAZ); +} + +void Assembler::pacibz() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(PACIBZ); +} + +void Assembler::autiaz() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(AUTIAZ); +} + +void Assembler::autibz() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(AUTIBZ); +} + +void Assembler::paciasp() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(PACIASP); +} + +void Assembler::pacibsp() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(PACIBSP); +} + +void Assembler::autiasp() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(AUTIASP); +} + +void Assembler::autibsp() { + VIXL_ASSERT(CPUHas(CPUFeatures::kPAuth)); + Emit(AUTIBSP); +} + +void Assembler::bti(BranchTargetIdentifier id) { + VIXL_ASSERT((id != EmitPACIASP) && (id != EmitPACIBSP)); // Not modes of Bti. + VIXL_ASSERT(id != EmitBTI_none); // Always generate an instruction. + VIXL_ASSERT(CPUHas(CPUFeatures::kBTI)); + hint(static_cast(id)); +} + +void Assembler::mvn(const Register& rd, const Operand& operand) { + orn(rd, AppropriateZeroRegFor(rd), operand); +} + + +void Assembler::mrs(const Register& xt, SystemRegister sysreg) { + VIXL_ASSERT(xt.Is64Bits()); + Emit(MRS | ImmSystemRegister(sysreg) | Rt(xt)); +} + + +void Assembler::msr(SystemRegister sysreg, const Register& xt) { + VIXL_ASSERT(xt.Is64Bits()); + Emit(MSR | Rt(xt) | ImmSystemRegister(sysreg)); +} + + +void Assembler::cfinv() { + VIXL_ASSERT(CPUHas(CPUFeatures::kFlagM)); + Emit(CFINV); +} + + +void Assembler::axflag() { + VIXL_ASSERT(CPUHas(CPUFeatures::kAXFlag)); + Emit(AXFLAG); +} + + +void Assembler::xaflag() { + VIXL_ASSERT(CPUHas(CPUFeatures::kAXFlag)); + Emit(XAFLAG); +} + + +void Assembler::clrex(int imm4) { Emit(CLREX | CRm(imm4)); } + + +void Assembler::dmb(BarrierDomain domain, BarrierType type) { + Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type)); +} + + +void Assembler::dsb(BarrierDomain domain, BarrierType type) { + Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type)); +} + + +void Assembler::isb() { + Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll)); +} + +void Assembler::esb() { + VIXL_ASSERT(CPUHas(CPUFeatures::kRAS)); + hint(ESB); +} + +void Assembler::csdb() { hint(CSDB); } + +void Assembler::fmov(const VRegister& vd, double imm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vd.IsScalar()) { + VIXL_ASSERT(vd.Is1D()); + Emit(FMOV_d_imm | Rd(vd) | ImmFP64(imm)); + } else { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.Is2D()); + Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit; + Instr q = NEON_Q; + uint32_t encoded_imm = FP64ToImm8(imm); + Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd)); + } +} + + +void Assembler::fmov(const VRegister& vd, float imm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vd.IsScalar()) { + VIXL_ASSERT(vd.Is1S()); + Emit(FMOV_s_imm | Rd(vd) | ImmFP32(imm)); + } else { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.Is2S() | vd.Is4S()); + Instr op = NEONModifiedImmediate_MOVI; + Instr q = vd.Is4S() ? NEON_Q : 0; + uint32_t encoded_imm = FP32ToImm8(imm); + Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd)); + } +} + + +void Assembler::fmov(const VRegister& vd, Float16 imm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vd.IsScalar()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + VIXL_ASSERT(vd.Is1H()); + Emit(FMOV_h_imm | Rd(vd) | ImmFP16(imm)); + } else { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kNEONHalf)); + VIXL_ASSERT(vd.Is4H() | vd.Is8H()); + Instr q = vd.Is8H() ? NEON_Q : 0; + uint32_t encoded_imm = FP16ToImm8(imm); + Emit(q | NEONModifiedImmediate_FMOV | ImmNEONabcdefgh(encoded_imm) | + NEONCmode(0xf) | Rd(vd)); + } +} + + +void Assembler::fmov(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D()); + VIXL_ASSERT((rd.GetSizeInBits() == vn.GetSizeInBits()) || vn.Is1H()); + FPIntegerConvertOp op; + switch (vn.GetSizeInBits()) { + case 16: + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + op = rd.Is64Bits() ? FMOV_xh : FMOV_wh; + break; + case 32: + op = FMOV_ws; + break; + default: + op = FMOV_xd; + } + Emit(op | Rd(rd) | Rn(vn)); +} + + +void Assembler::fmov(const VRegister& vd, const Register& rn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); + VIXL_ASSERT((vd.GetSizeInBits() == rn.GetSizeInBits()) || vd.Is1H()); + FPIntegerConvertOp op; + switch (vd.GetSizeInBits()) { + case 16: + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + op = rn.Is64Bits() ? FMOV_hx : FMOV_hw; + break; + case 32: + op = FMOV_sw; + break; + default: + op = FMOV_dx; + } + Emit(op | Rd(vd) | Rn(rn)); +} + + +void Assembler::fmov(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + } + VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); + VIXL_ASSERT(vd.IsSameFormat(vn)); + Emit(FPType(vd) | FMOV | Rd(vd) | Rn(vn)); +} + + +void Assembler::fmov(const VRegister& vd, int index, const Register& rn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kFP)); + VIXL_ASSERT((index == 1) && vd.Is1D() && rn.IsX()); + USE(index); + Emit(FMOV_d1_x | Rd(vd) | Rn(rn)); +} + + +void Assembler::fmov(const Register& rd, const VRegister& vn, int index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kFP)); + VIXL_ASSERT((index == 1) && vn.Is1D() && rd.IsX()); + USE(index); + Emit(FMOV_x_d1 | Rd(rd) | Rn(vn)); +} + + +void Assembler::fmadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + FPDataProcessing3SourceOp op; + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + op = FMADD_h; + } else if (vd.Is1S()) { + op = FMADD_s; + } else { + VIXL_ASSERT(vd.Is1D()); + op = FMADD_d; + } + FPDataProcessing3Source(vd, vn, vm, va, op); +} + + +void Assembler::fmsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + FPDataProcessing3SourceOp op; + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + op = FMSUB_h; + } else if (vd.Is1S()) { + op = FMSUB_s; + } else { + VIXL_ASSERT(vd.Is1D()); + op = FMSUB_d; + } + FPDataProcessing3Source(vd, vn, vm, va, op); +} + + +void Assembler::fnmadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + FPDataProcessing3SourceOp op; + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + op = FNMADD_h; + } else if (vd.Is1S()) { + op = FNMADD_s; + } else { + VIXL_ASSERT(vd.Is1D()); + op = FNMADD_d; + } + FPDataProcessing3Source(vd, vn, vm, va, op); +} + + +void Assembler::fnmsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + FPDataProcessing3SourceOp op; + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + op = FNMSUB_h; + } else if (vd.Is1S()) { + op = FNMSUB_s; + } else { + VIXL_ASSERT(vd.Is1D()); + op = FNMSUB_d; + } + FPDataProcessing3Source(vd, vn, vm, va, op); +} + + +void Assembler::fnmul(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm)); + Instr op; + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + op = FNMUL_h; + } else if (vd.Is1S()) { + op = FNMUL_s; + } else { + VIXL_ASSERT(vd.Is1D()); + op = FNMUL_d; + } + Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::FPCompareMacro(const VRegister& vn, + double value, + FPTrapFlags trap) { + USE(value); + // Although the fcmp{e} instructions can strictly only take an immediate + // value of +0.0, we don't need to check for -0.0 because the sign of 0.0 + // doesn't affect the result of the comparison. + VIXL_ASSERT(value == 0.0); + VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D()); + Instr op = (trap == EnableTrap) ? FCMPE_zero : FCMP_zero; + Emit(FPType(vn) | op | Rn(vn)); +} + + +void Assembler::FPCompareMacro(const VRegister& vn, + const VRegister& vm, + FPTrapFlags trap) { + VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D()); + VIXL_ASSERT(vn.IsSameSizeAndType(vm)); + Instr op = (trap == EnableTrap) ? FCMPE : FCMP; + Emit(FPType(vn) | op | Rm(vm) | Rn(vn)); +} + + +void Assembler::fcmp(const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + FPCompareMacro(vn, vm, DisableTrap); +} + + +void Assembler::fcmpe(const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + FPCompareMacro(vn, vm, EnableTrap); +} + + +void Assembler::fcmp(const VRegister& vn, double value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + FPCompareMacro(vn, value, DisableTrap); +} + + +void Assembler::fcmpe(const VRegister& vn, double value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + FPCompareMacro(vn, value, EnableTrap); +} + + +void Assembler::FPCCompareMacro(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond, + FPTrapFlags trap) { + VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D()); + VIXL_ASSERT(vn.IsSameSizeAndType(vm)); + Instr op = (trap == EnableTrap) ? FCCMPE : FCCMP; + Emit(FPType(vn) | op | Rm(vm) | Cond(cond) | Rn(vn) | Nzcv(nzcv)); +} + +void Assembler::fccmp(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + FPCCompareMacro(vn, vm, nzcv, cond, DisableTrap); +} + + +void Assembler::fccmpe(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + FPCCompareMacro(vn, vm, nzcv, cond, EnableTrap); +} + + +void Assembler::fcsel(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Condition cond) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + Emit(FPType(vd) | FCSEL | Rm(vm) | Cond(cond) | Rn(vn) | Rd(vd)); +} + + +void Assembler::fcvt(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + FPDataProcessing1SourceOp op; + // The half-precision variants belong to base FP, and do not require kFPHalf. + if (vd.Is1D()) { + VIXL_ASSERT(vn.Is1S() || vn.Is1H()); + op = vn.Is1S() ? FCVT_ds : FCVT_dh; + } else if (vd.Is1S()) { + VIXL_ASSERT(vn.Is1D() || vn.Is1H()); + op = vn.Is1D() ? FCVT_sd : FCVT_sh; + } else { + VIXL_ASSERT(vd.Is1H()); + VIXL_ASSERT(vn.Is1D() || vn.Is1S()); + op = vn.Is1D() ? FCVT_hd : FCVT_hs; + } + FPDataProcessing1Source(vd, vn, op); +} + + +void Assembler::fcvtl(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is4S() && vn.Is4H()) || (vd.Is2D() && vn.Is2S())); + // The half-precision variants belong to base FP, and do not require kFPHalf. + Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0; + Emit(format | NEON_FCVTL | Rn(vn) | Rd(vd)); +} + + +void Assembler::fcvtl2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is4S() && vn.Is8H()) || (vd.Is2D() && vn.Is4S())); + // The half-precision variants belong to base FP, and do not require kFPHalf. + Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0; + Emit(NEON_Q | format | NEON_FCVTL | Rn(vn) | Rd(vd)); +} + + +void Assembler::fcvtn(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT((vn.Is4S() && vd.Is4H()) || (vn.Is2D() && vd.Is2S())); + // The half-precision variants belong to base FP, and do not require kFPHalf. + Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0; + Emit(format | NEON_FCVTN | Rn(vn) | Rd(vd)); +} + + +void Assembler::fcvtn2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT((vn.Is4S() && vd.Is8H()) || (vn.Is2D() && vd.Is4S())); + // The half-precision variants belong to base FP, and do not require kFPHalf. + Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0; + Emit(NEON_Q | format | NEON_FCVTN | Rn(vn) | Rd(vd)); +} + + +void Assembler::fcvtxn(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + Instr format = 1 << NEONSize_offset; + if (vd.IsScalar()) { + VIXL_ASSERT(vd.Is1S() && vn.Is1D()); + Emit(format | NEON_FCVTXN_scalar | Rn(vn) | Rd(vd)); + } else { + VIXL_ASSERT(vd.Is2S() && vn.Is2D()); + Emit(format | NEON_FCVTXN | Rn(vn) | Rd(vd)); + } +} + + +void Assembler::fcvtxn2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT(vd.Is4S() && vn.Is2D()); + Instr format = 1 << NEONSize_offset; + Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd)); +} + +void Assembler::fjcvtzs(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kJSCVT)); + VIXL_ASSERT(rd.IsW() && vn.Is1D()); + Emit(FJCVTZS | Rn(vn) | Rd(rd)); +} + + +void Assembler::NEONFPConvertToInt(const Register& rd, + const VRegister& vn, + Instr op) { + Emit(SF(rd) | FPType(vn) | op | Rn(vn) | Rd(rd)); +} + + +void Assembler::NEONFPConvertToInt(const VRegister& vd, + const VRegister& vn, + Instr op) { + if (vn.IsScalar()) { + VIXL_ASSERT((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D())); + op |= NEON_Q | NEONScalar; + } + Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONFP16ConvertToInt(const VRegister& vd, + const VRegister& vn, + Instr op) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vn.IsLaneSizeH()); + if (vn.IsScalar()) { + op |= NEON_Q | NEONScalar; + } else if (vn.Is8H()) { + op |= NEON_Q; + } + Emit(op | Rn(vn) | Rd(vd)); +} + + +#define NEON_FP2REGMISC_FCVT_LIST(V) \ + V(fcvtnu, NEON_FCVTNU, FCVTNU) \ + V(fcvtns, NEON_FCVTNS, FCVTNS) \ + V(fcvtpu, NEON_FCVTPU, FCVTPU) \ + V(fcvtps, NEON_FCVTPS, FCVTPS) \ + V(fcvtmu, NEON_FCVTMU, FCVTMU) \ + V(fcvtms, NEON_FCVTMS, FCVTMS) \ + V(fcvtau, NEON_FCVTAU, FCVTAU) \ + V(fcvtas, NEON_FCVTAS, FCVTAS) + +#define DEFINE_ASM_FUNCS(FN, VEC_OP, SCA_OP) \ + void Assembler::FN(const Register& rd, const VRegister& vn) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); \ + if (vn.IsH()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); \ + NEONFPConvertToInt(rd, vn, SCA_OP); \ + } \ + void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); \ + if (vd.IsLaneSizeH()) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \ + NEONFP16ConvertToInt(vd, vn, VEC_OP##_H); \ + } else { \ + NEONFPConvertToInt(vd, vn, VEC_OP); \ + } \ + } +NEON_FP2REGMISC_FCVT_LIST(DEFINE_ASM_FUNCS) +#undef DEFINE_ASM_FUNCS + + +void Assembler::fcvtzs(const Register& rd, const VRegister& vn, int fbits) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D()); + VIXL_ASSERT((fbits >= 0) && (fbits <= rd.GetSizeInBits())); + if (fbits == 0) { + Emit(SF(rd) | FPType(vn) | FCVTZS | Rn(vn) | Rd(rd)); + } else { + Emit(SF(rd) | FPType(vn) | FCVTZS_fixed | FPScale(64 - fbits) | Rn(vn) | + Rd(rd)); + } +} + + +void Assembler::fcvtzs(const VRegister& vd, const VRegister& vn, int fbits) { + // This form is a NEON scalar FP instruction. + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + if (vn.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + VIXL_ASSERT(fbits >= 0); + if (fbits == 0) { + if (vd.IsLaneSizeH()) { + NEONFP2RegMiscFP16(vd, vn, NEON_FCVTZS_H); + } else { + NEONFP2RegMisc(vd, vn, NEON_FCVTZS); + } + } else { + VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S() || + vd.Is1H() || vd.Is4H() || vd.Is8H()); + NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZS_imm); + } +} + + +void Assembler::fcvtzu(const Register& rd, const VRegister& vn, int fbits) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vn.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + VIXL_ASSERT(vn.Is1H() || vn.Is1S() || vn.Is1D()); + VIXL_ASSERT((fbits >= 0) && (fbits <= rd.GetSizeInBits())); + if (fbits == 0) { + Emit(SF(rd) | FPType(vn) | FCVTZU | Rn(vn) | Rd(rd)); + } else { + Emit(SF(rd) | FPType(vn) | FCVTZU_fixed | FPScale(64 - fbits) | Rn(vn) | + Rd(rd)); + } +} + + +void Assembler::fcvtzu(const VRegister& vd, const VRegister& vn, int fbits) { + // This form is a NEON scalar FP instruction. + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + if (vn.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + VIXL_ASSERT(fbits >= 0); + if (fbits == 0) { + if (vd.IsLaneSizeH()) { + NEONFP2RegMiscFP16(vd, vn, NEON_FCVTZU_H); + } else { + NEONFP2RegMisc(vd, vn, NEON_FCVTZU); + } + } else { + VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S() || + vd.Is1H() || vd.Is4H() || vd.Is8H()); + NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZU_imm); + } +} + +void Assembler::ucvtf(const VRegister& vd, const VRegister& vn, int fbits) { + // This form is a NEON scalar FP instruction. + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + if (vn.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + VIXL_ASSERT(fbits >= 0); + if (fbits == 0) { + if (vd.IsLaneSizeH()) { + NEONFP2RegMiscFP16(vd, vn, NEON_UCVTF_H); + } else { + NEONFP2RegMisc(vd, vn, NEON_UCVTF); + } + } else { + VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S() || + vd.Is1H() || vd.Is4H() || vd.Is8H()); + NEONShiftRightImmediate(vd, vn, fbits, NEON_UCVTF_imm); + } +} + +void Assembler::scvtf(const VRegister& vd, const VRegister& vn, int fbits) { + // This form is a NEON scalar FP instruction. + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + if (vn.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + VIXL_ASSERT(fbits >= 0); + if (fbits == 0) { + if (vd.IsLaneSizeH()) { + NEONFP2RegMiscFP16(vd, vn, NEON_SCVTF_H); + } else { + NEONFP2RegMisc(vd, vn, NEON_SCVTF); + } + } else { + VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S() || + vd.Is1H() || vd.Is4H() || vd.Is8H()); + NEONShiftRightImmediate(vd, vn, fbits, NEON_SCVTF_imm); + } +} + + +void Assembler::scvtf(const VRegister& vd, const Register& rn, int fbits) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); + VIXL_ASSERT(fbits >= 0); + if (fbits == 0) { + Emit(SF(rn) | FPType(vd) | SCVTF | Rn(rn) | Rd(vd)); + } else { + Emit(SF(rn) | FPType(vd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | + Rd(vd)); + } +} + + +void Assembler::ucvtf(const VRegister& vd, const Register& rn, int fbits) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); + if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); + VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); + VIXL_ASSERT(fbits >= 0); + if (fbits == 0) { + Emit(SF(rn) | FPType(vd) | UCVTF | Rn(rn) | Rd(vd)); + } else { + Emit(SF(rn) | FPType(vd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | + Rd(vd)); + } +} + + +void Assembler::NEON3Same(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3SameOp vop) { + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(vd.IsVector() || !vd.IsQ()); + + Instr format, op = vop; + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + format = SFormat(vd); + } else { + format = VFormat(vd); + } + + Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONFP3Same(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Instr op) { + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + Emit(FPFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEON3SameFP16(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Instr op) { + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(vd.GetLaneSizeInBytes() == kHRegSizeInBytes); + if (vd.Is8H()) op |= NEON_Q; + Emit(op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +// clang-format off +#define NEON_FP2REGMISC_LIST(V) \ + V(fabs, NEON_FABS, FABS, FABS_h) \ + V(fneg, NEON_FNEG, FNEG, FNEG_h) \ + V(fsqrt, NEON_FSQRT, FSQRT, FSQRT_h) \ + V(frintn, NEON_FRINTN, FRINTN, FRINTN_h) \ + V(frinta, NEON_FRINTA, FRINTA, FRINTA_h) \ + V(frintp, NEON_FRINTP, FRINTP, FRINTP_h) \ + V(frintm, NEON_FRINTM, FRINTM, FRINTM_h) \ + V(frintx, NEON_FRINTX, FRINTX, FRINTX_h) \ + V(frintz, NEON_FRINTZ, FRINTZ, FRINTZ_h) \ + V(frinti, NEON_FRINTI, FRINTI, FRINTI_h) \ + V(frsqrte, NEON_FRSQRTE, NEON_FRSQRTE_scalar, NEON_FRSQRTE_H_scalar) \ + V(frecpe, NEON_FRECPE, NEON_FRECPE_scalar, NEON_FRECPE_H_scalar) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP, SCA_OP_H) \ + void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); \ + Instr op; \ + if (vd.IsScalar()) { \ + if (vd.Is1H()) { \ + if ((SCA_OP_H & NEONScalar2RegMiscFP16FMask) == \ + NEONScalar2RegMiscFP16Fixed) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kNEONHalf)); \ + } else { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); \ + } \ + op = SCA_OP_H; \ + } else { \ + if ((SCA_OP & NEONScalar2RegMiscFMask) == NEONScalar2RegMiscFixed) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + } \ + VIXL_ASSERT(vd.Is1S() || vd.Is1D()); \ + op = SCA_OP; \ + } \ + } else { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + VIXL_ASSERT(vd.Is4H() || vd.Is8H() || vd.Is2S() || vd.Is2D() || \ + vd.Is4S()); \ + if (vd.IsLaneSizeH()) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \ + op = VEC_OP##_H; \ + if (vd.Is8H()) { \ + op |= NEON_Q; \ + } \ + } else { \ + op = VEC_OP; \ + } \ + } \ + if (vd.IsLaneSizeH()) { \ + NEONFP2RegMiscFP16(vd, vn, op); \ + } else { \ + NEONFP2RegMisc(vd, vn, op); \ + } \ + } +NEON_FP2REGMISC_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +void Assembler::NEONFP2RegMiscFP16(const VRegister& vd, + const VRegister& vn, + Instr op) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + Emit(op | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONFP2RegMisc(const VRegister& vd, + const VRegister& vn, + Instr op) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEON2RegMisc(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp vop, + int value) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(value == 0); + USE(value); + + Instr format, op = vop; + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + format = SFormat(vd); + } else { + format = VFormat(vd); + } + + Emit(format | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::cmeq(const VRegister& vd, const VRegister& vn, int value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_CMEQ_zero, value); +} + + +void Assembler::cmge(const VRegister& vd, const VRegister& vn, int value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_CMGE_zero, value); +} + + +void Assembler::cmgt(const VRegister& vd, const VRegister& vn, int value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_CMGT_zero, value); +} + + +void Assembler::cmle(const VRegister& vd, const VRegister& vn, int value) { + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEON2RegMisc(vd, vn, NEON_CMLE_zero, value); +} + + +void Assembler::cmlt(const VRegister& vd, const VRegister& vn, int value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_CMLT_zero, value); +} + + +void Assembler::shll(const VRegister& vd, const VRegister& vn, int shift) { + USE(shift); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is8H() && vn.Is8B() && shift == 8) || + (vd.Is4S() && vn.Is4H() && shift == 16) || + (vd.Is2D() && vn.Is2S() && shift == 32)); + Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd)); +} + + +void Assembler::shll2(const VRegister& vd, const VRegister& vn, int shift) { + USE(shift); + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is8H() && vn.Is16B() && shift == 8) || + (vd.Is4S() && vn.Is8H() && shift == 16) || + (vd.Is2D() && vn.Is4S() && shift == 32)); + Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONFP2RegMisc(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp vop, + double value) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(value == 0.0); + USE(value); + + Instr op = vop; + if (vd.IsScalar()) { + VIXL_ASSERT(vd.Is1S() || vd.Is1D()); + op |= NEON_Q | NEONScalar; + } else { + VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S()); + } + + Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONFP2RegMiscFP16(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscFP16Op vop, + double value) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(value == 0.0); + USE(value); + + Instr op = vop; + if (vd.IsScalar()) { + VIXL_ASSERT(vd.Is1H()); + op |= NEON_Q | NEONScalar; + } else { + VIXL_ASSERT(vd.Is4H() || vd.Is8H()); + if (vd.Is8H()) { + op |= NEON_Q; + } + } + + Emit(op | Rn(vn) | Rd(vd)); +} + + +void Assembler::fcmeq(const VRegister& vd, const VRegister& vn, double value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + if (vd.IsLaneSizeH()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + NEONFP2RegMiscFP16(vd, vn, NEON_FCMEQ_H_zero, value); + } else { + NEONFP2RegMisc(vd, vn, NEON_FCMEQ_zero, value); + } +} + + +void Assembler::fcmge(const VRegister& vd, const VRegister& vn, double value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + if (vd.IsLaneSizeH()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + NEONFP2RegMiscFP16(vd, vn, NEON_FCMGE_H_zero, value); + } else { + NEONFP2RegMisc(vd, vn, NEON_FCMGE_zero, value); + } +} + + +void Assembler::fcmgt(const VRegister& vd, const VRegister& vn, double value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + if (vd.IsLaneSizeH()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + NEONFP2RegMiscFP16(vd, vn, NEON_FCMGT_H_zero, value); + } else { + NEONFP2RegMisc(vd, vn, NEON_FCMGT_zero, value); + } +} + + +void Assembler::fcmle(const VRegister& vd, const VRegister& vn, double value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + if (vd.IsLaneSizeH()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + NEONFP2RegMiscFP16(vd, vn, NEON_FCMLE_H_zero, value); + } else { + NEONFP2RegMisc(vd, vn, NEON_FCMLE_zero, value); + } +} + + +void Assembler::fcmlt(const VRegister& vd, const VRegister& vn, double value) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + if (vd.IsLaneSizeH()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + NEONFP2RegMiscFP16(vd, vn, NEON_FCMLT_H_zero, value); + } else { + NEONFP2RegMisc(vd, vn, NEON_FCMLT_zero, value); + } +} + + +void Assembler::frecpx(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsScalar()); + VIXL_ASSERT(AreSameFormat(vd, vn)); + Instr op; + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + op = NEON_FRECPX_H_scalar; + } else { + VIXL_ASSERT(vd.Is1S() || vd.Is1D()); + op = NEON_FRECPX_scalar; + } + Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd)); +} + + +// clang-format off +#define NEON_3SAME_LIST(V) \ + V(add, NEON_ADD, vd.IsVector() || vd.Is1D()) \ + V(addp, NEON_ADDP, vd.IsVector() || vd.Is1D()) \ + V(sub, NEON_SUB, vd.IsVector() || vd.Is1D()) \ + V(cmeq, NEON_CMEQ, vd.IsVector() || vd.Is1D()) \ + V(cmge, NEON_CMGE, vd.IsVector() || vd.Is1D()) \ + V(cmgt, NEON_CMGT, vd.IsVector() || vd.Is1D()) \ + V(cmhi, NEON_CMHI, vd.IsVector() || vd.Is1D()) \ + V(cmhs, NEON_CMHS, vd.IsVector() || vd.Is1D()) \ + V(cmtst, NEON_CMTST, vd.IsVector() || vd.Is1D()) \ + V(sshl, NEON_SSHL, vd.IsVector() || vd.Is1D()) \ + V(ushl, NEON_USHL, vd.IsVector() || vd.Is1D()) \ + V(srshl, NEON_SRSHL, vd.IsVector() || vd.Is1D()) \ + V(urshl, NEON_URSHL, vd.IsVector() || vd.Is1D()) \ + V(sqdmulh, NEON_SQDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \ + V(sqrdmulh, NEON_SQRDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \ + V(shadd, NEON_SHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uhadd, NEON_UHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(srhadd, NEON_SRHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(urhadd, NEON_URHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(shsub, NEON_SHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uhsub, NEON_UHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(smax, NEON_SMAX, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(smaxp, NEON_SMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(smin, NEON_SMIN, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(sminp, NEON_SMINP, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(umax, NEON_UMAX, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(umaxp, NEON_UMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(umin, NEON_UMIN, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uminp, NEON_UMINP, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(saba, NEON_SABA, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(sabd, NEON_SABD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uaba, NEON_UABA, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(uabd, NEON_UABD, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(mla, NEON_MLA, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(mls, NEON_MLS, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(mul, NEON_MUL, vd.IsVector() && !vd.IsLaneSizeD()) \ + V(and_, NEON_AND, vd.Is8B() || vd.Is16B()) \ + V(orr, NEON_ORR, vd.Is8B() || vd.Is16B()) \ + V(orn, NEON_ORN, vd.Is8B() || vd.Is16B()) \ + V(eor, NEON_EOR, vd.Is8B() || vd.Is16B()) \ + V(bic, NEON_BIC, vd.Is8B() || vd.Is16B()) \ + V(bit, NEON_BIT, vd.Is8B() || vd.Is16B()) \ + V(bif, NEON_BIF, vd.Is8B() || vd.Is16B()) \ + V(bsl, NEON_BSL, vd.Is8B() || vd.Is16B()) \ + V(pmul, NEON_PMUL, vd.Is8B() || vd.Is16B()) \ + V(uqadd, NEON_UQADD, true) \ + V(sqadd, NEON_SQADD, true) \ + V(uqsub, NEON_UQSUB, true) \ + V(sqsub, NEON_SQSUB, true) \ + V(sqshl, NEON_SQSHL, true) \ + V(uqshl, NEON_UQSHL, true) \ + V(sqrshl, NEON_SQRSHL, true) \ + V(uqrshl, NEON_UQRSHL, true) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ + void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + VIXL_ASSERT(AS); \ + NEON3Same(vd, vn, vm, OP); \ + } +NEON_3SAME_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +// clang-format off +#define NEON_FP3SAME_OP_LIST(V) \ + V(fmulx, NEON_FMULX, NEON_FMULX_scalar, NEON_FMULX_H_scalar) \ + V(frecps, NEON_FRECPS, NEON_FRECPS_scalar, NEON_FRECPS_H_scalar) \ + V(frsqrts, NEON_FRSQRTS, NEON_FRSQRTS_scalar, NEON_FRSQRTS_H_scalar) \ + V(fabd, NEON_FABD, NEON_FABD_scalar, NEON_FABD_H_scalar) \ + V(fmla, NEON_FMLA, 0, 0) \ + V(fmls, NEON_FMLS, 0, 0) \ + V(facge, NEON_FACGE, NEON_FACGE_scalar, NEON_FACGE_H_scalar) \ + V(facgt, NEON_FACGT, NEON_FACGT_scalar, NEON_FACGT_H_scalar) \ + V(fcmeq, NEON_FCMEQ, NEON_FCMEQ_scalar, NEON_FCMEQ_H_scalar) \ + V(fcmge, NEON_FCMGE, NEON_FCMGE_scalar, NEON_FCMGE_H_scalar) \ + V(fcmgt, NEON_FCMGT, NEON_FCMGT_scalar, NEON_FCMGT_H_scalar) \ + V(faddp, NEON_FADDP, 0, 0) \ + V(fmaxp, NEON_FMAXP, 0, 0) \ + V(fminp, NEON_FMINP, 0, 0) \ + V(fmaxnmp, NEON_FMAXNMP, 0, 0) \ + V(fadd, NEON_FADD, FADD, 0) \ + V(fsub, NEON_FSUB, FSUB, 0) \ + V(fmul, NEON_FMUL, FMUL, 0) \ + V(fdiv, NEON_FDIV, FDIV, 0) \ + V(fmax, NEON_FMAX, FMAX, 0) \ + V(fmin, NEON_FMIN, FMIN, 0) \ + V(fmaxnm, NEON_FMAXNM, FMAXNM, 0) \ + V(fminnm, NEON_FMINNM, FMINNM, 0) \ + V(fminnmp, NEON_FMINNMP, 0, 0) +// clang-format on + +// TODO: This macro is complicated because it classifies the instructions in the +// macro list above, and treats each case differently. It could be somewhat +// simpler if we were to split the macro, at the cost of some duplication. +#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP, SCA_OP_H) \ + void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kFP)); \ + Instr op; \ + bool is_fp16 = false; \ + if ((SCA_OP != 0) && vd.IsScalar()) { \ + if ((SCA_OP_H != 0) && vd.Is1H()) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kNEONHalf)); \ + is_fp16 = true; \ + op = SCA_OP_H; \ + } else { \ + VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); \ + if ((SCA_OP & NEONScalar3SameFMask) == NEONScalar3SameFixed) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \ + } else if (vd.Is1H()) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kFPHalf)); \ + } \ + op = SCA_OP; \ + } \ + } else { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + VIXL_ASSERT(vd.IsVector()); \ + if (vd.Is4H() || vd.Is8H()) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \ + is_fp16 = true; \ + op = VEC_OP##_H; \ + } else { \ + VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S()); \ + op = VEC_OP; \ + } \ + } \ + if (is_fp16) { \ + NEON3SameFP16(vd, vn, vm, op); \ + } else { \ + NEONFP3Same(vd, vn, vm, op); \ + } \ + } +NEON_FP3SAME_OP_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +// clang-format off +#define NEON_FHM_LIST(V) \ + V(fmlal, NEON_FMLAL) \ + V(fmlal2, NEON_FMLAL2) \ + V(fmlsl, NEON_FMLSL) \ + V(fmlsl2, NEON_FMLSL2) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, VEC_OP) \ + void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, \ + CPUFeatures::kFP, \ + CPUFeatures::kNEONHalf, \ + CPUFeatures::kFHM)); \ + VIXL_ASSERT((vd.Is2S() && vn.Is2H() && vm.Is2H()) || \ + (vd.Is4S() && vn.Is4H() && vm.Is4H())); \ + Emit(FPFormat(vd) | VEC_OP | Rm(vm) | Rn(vn) | Rd(vd)); \ + } +NEON_FHM_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +void Assembler::addp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is1D() && vn.Is2D())); + Emit(SFormat(vd) | NEON_ADDP_scalar | Rn(vn) | Rd(vd)); +} + + +void Assembler::sqrdmlah(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kRDM)); + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(vd.IsVector() || !vd.IsQ()); + + Instr format, op = NEON_SQRDMLAH; + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + format = SFormat(vd); + } else { + format = VFormat(vd); + } + + Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::sqrdmlsh(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kRDM)); + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(vd.IsVector() || !vd.IsQ()); + + Instr format, op = NEON_SQRDMLSH; + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + format = SFormat(vd); + } else { + format = VFormat(vd); + } + + Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::sdot(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kDotProduct)); + VIXL_ASSERT(AreSameFormat(vn, vm)); + VIXL_ASSERT((vd.Is2S() && vn.Is8B()) || (vd.Is4S() && vn.Is16B())); + + Emit(VFormat(vd) | NEON_SDOT | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::udot(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kDotProduct)); + VIXL_ASSERT(AreSameFormat(vn, vm)); + VIXL_ASSERT((vd.Is2S() && vn.Is8B()) || (vd.Is4S() && vn.Is16B())); + + Emit(VFormat(vd) | NEON_UDOT | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::faddp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) || + (vd.Is1H() && vn.Is2H())); + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + Emit(NEON_FADDP_h_scalar | Rn(vn) | Rd(vd)); + } else { + Emit(FPFormat(vd) | NEON_FADDP_scalar | Rn(vn) | Rd(vd)); + } +} + + +void Assembler::fmaxp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) || + (vd.Is1H() && vn.Is2H())); + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + Emit(NEON_FMAXP_h_scalar | Rn(vn) | Rd(vd)); + } else { + Emit(FPFormat(vd) | NEON_FMAXP_scalar | Rn(vn) | Rd(vd)); + } +} + + +void Assembler::fminp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) || + (vd.Is1H() && vn.Is2H())); + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + Emit(NEON_FMINP_h_scalar | Rn(vn) | Rd(vd)); + } else { + Emit(FPFormat(vd) | NEON_FMINP_scalar | Rn(vn) | Rd(vd)); + } +} + + +void Assembler::fmaxnmp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) || + (vd.Is1H() && vn.Is2H())); + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + Emit(NEON_FMAXNMP_h_scalar | Rn(vn) | Rd(vd)); + } else { + Emit(FPFormat(vd) | NEON_FMAXNMP_scalar | Rn(vn) | Rd(vd)); + } +} + + +void Assembler::fminnmp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); + VIXL_ASSERT((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()) || + (vd.Is1H() && vn.Is2H())); + if (vd.Is1H()) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + Emit(NEON_FMINNMP_h_scalar | Rn(vn) | Rd(vd)); + } else { + Emit(FPFormat(vd) | NEON_FMINNMP_scalar | Rn(vn) | Rd(vd)); + } +} + + +// v8.3 complex numbers - floating-point complex multiply accumulate. +void Assembler::fcmla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + int rot) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kFcma)); + VIXL_ASSERT(vd.IsVector() && AreSameFormat(vd, vn)); + VIXL_ASSERT((vm.IsH() && (vd.Is8H() || vd.Is4H())) || + (vm.IsS() && vd.Is4S())); + if (vd.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + int index_num_bits = vd.Is4S() ? 1 : 2; + Emit(VFormat(vd) | Rm(vm) | NEON_FCMLA_byelement | + ImmNEONHLM(vm_index, index_num_bits) | ImmRotFcmlaSca(rot) | Rn(vn) | + Rd(vd)); +} + + +void Assembler::fcmla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int rot) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kFcma)); + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(vd.IsVector() && !vd.IsLaneSizeB()); + if (vd.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + Emit(VFormat(vd) | Rm(vm) | NEON_FCMLA | ImmRotFcmlaVec(rot) | Rn(vn) | + Rd(vd)); +} + + +// v8.3 complex numbers - floating-point complex add. +void Assembler::fcadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int rot) { + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kFcma)); + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(vd.IsVector() && !vd.IsLaneSizeB()); + if (vd.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); + Emit(VFormat(vd) | Rm(vm) | NEON_FCADD | ImmRotFcadd(rot) | Rn(vn) | Rd(vd)); +} + + +void Assembler::orr(const VRegister& vd, const int imm8, const int left_shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_ORR); +} + + +void Assembler::mov(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + if (vd.IsD()) { + orr(vd.V8B(), vn.V8B(), vn.V8B()); + } else { + VIXL_ASSERT(vd.IsQ()); + orr(vd.V16B(), vn.V16B(), vn.V16B()); + } +} + + +void Assembler::bic(const VRegister& vd, const int imm8, const int left_shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_BIC); +} + + +void Assembler::movi(const VRegister& vd, + const uint64_t imm, + Shift shift, + const int shift_amount) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT((shift == LSL) || (shift == MSL)); + if (vd.Is2D() || vd.Is1D()) { + VIXL_ASSERT(shift_amount == 0); + int imm8 = 0; + for (int i = 0; i < 8; ++i) { + int byte = (imm >> (i * 8)) & 0xff; + VIXL_ASSERT((byte == 0) || (byte == 0xff)); + if (byte == 0xff) { + imm8 |= (1 << i); + } + } + int q = vd.Is2D() ? NEON_Q : 0; + Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI | + ImmNEONabcdefgh(imm8) | NEONCmode(0xe) | Rd(vd)); + } else if (shift == LSL) { + VIXL_ASSERT(IsUint8(imm)); + NEONModifiedImmShiftLsl(vd, + static_cast(imm), + shift_amount, + NEONModifiedImmediate_MOVI); + } else { + VIXL_ASSERT(IsUint8(imm)); + NEONModifiedImmShiftMsl(vd, + static_cast(imm), + shift_amount, + NEONModifiedImmediate_MOVI); + } +} + + +void Assembler::mvn(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + if (vd.IsD()) { + not_(vd.V8B(), vn.V8B()); + } else { + VIXL_ASSERT(vd.IsQ()); + not_(vd.V16B(), vn.V16B()); + } +} + + +void Assembler::mvni(const VRegister& vd, + const int imm8, + Shift shift, + const int shift_amount) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT((shift == LSL) || (shift == MSL)); + if (shift == LSL) { + NEONModifiedImmShiftLsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI); + } else { + NEONModifiedImmShiftMsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI); + } +} + + +void Assembler::NEONFPByElement(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + NEONByIndexedElementOp vop, + NEONByIndexedElementOp vop_half) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT((vd.Is2S() && vm.Is1S()) || (vd.Is4S() && vm.Is1S()) || + (vd.Is1S() && vm.Is1S()) || (vd.Is2D() && vm.Is1D()) || + (vd.Is1D() && vm.Is1D()) || (vd.Is4H() && vm.Is1H()) || + (vd.Is8H() && vm.Is1H()) || (vd.Is1H() && vm.Is1H())); + VIXL_ASSERT((vm.Is1S() && (vm_index < 4)) || (vm.Is1D() && (vm_index < 2)) || + (vm.Is1H() && (vm.GetCode() < 16) && (vm_index < 8))); + + Instr op = vop; + int index_num_bits; + if (vm.Is1D()) { + index_num_bits = 1; + } else if (vm.Is1S()) { + index_num_bits = 2; + } else { + index_num_bits = 3; + op = vop_half; + } + + if (vd.IsScalar()) { + op |= NEON_Q | NEONScalar; + } + + if (!vm.Is1H()) { + op |= FPFormat(vd); + } else if (vd.Is8H()) { + op |= NEON_Q; + } + + Emit(op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONByElement(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + NEONByIndexedElementOp vop) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT((vd.Is4H() && vm.Is1H()) || (vd.Is8H() && vm.Is1H()) || + (vd.Is1H() && vm.Is1H()) || (vd.Is2S() && vm.Is1S()) || + (vd.Is4S() && vm.Is1S()) || (vd.Is1S() && vm.Is1S())); + VIXL_ASSERT((vm.Is1H() && (vm.GetCode() < 16) && (vm_index < 8)) || + (vm.Is1S() && (vm_index < 4))); + + Instr format, op = vop; + int index_num_bits = vm.Is1H() ? 3 : 2; + if (vd.IsScalar()) { + op |= NEONScalar | NEON_Q; + format = SFormat(vn); + } else { + format = VFormat(vn); + } + Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | + Rd(vd)); +} + + +void Assembler::NEONByElementL(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + NEONByIndexedElementOp vop) { + VIXL_ASSERT((vd.Is4S() && vn.Is4H() && vm.Is1H()) || + (vd.Is4S() && vn.Is8H() && vm.Is1H()) || + (vd.Is1S() && vn.Is1H() && vm.Is1H()) || + (vd.Is2D() && vn.Is2S() && vm.Is1S()) || + (vd.Is2D() && vn.Is4S() && vm.Is1S()) || + (vd.Is1D() && vn.Is1S() && vm.Is1S())); + + VIXL_ASSERT((vm.Is1H() && (vm.GetCode() < 16) && (vm_index < 8)) || + (vm.Is1S() && (vm_index < 4))); + + Instr format, op = vop; + int index_num_bits = vm.Is1H() ? 3 : 2; + if (vd.IsScalar()) { + op |= NEONScalar | NEON_Q; + format = SFormat(vn); + } else { + format = VFormat(vn); + } + Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | + Rd(vd)); +} + + +void Assembler::sdot(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kDotProduct)); + VIXL_ASSERT((vd.Is2S() && vn.Is8B() && vm.Is1S4B()) || + (vd.Is4S() && vn.Is16B() && vm.Is1S4B())); + + int index_num_bits = 2; + Emit(VFormat(vd) | NEON_SDOT_byelement | + ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::udot(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kDotProduct)); + VIXL_ASSERT((vd.Is2S() && vn.Is8B() && vm.Is1S4B()) || + (vd.Is4S() && vn.Is16B() && vm.Is1S4B())); + + int index_num_bits = 2; + Emit(VFormat(vd) | NEON_UDOT_byelement | + ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +// clang-format off +#define NEON_BYELEMENT_LIST(V) \ + V(mul, NEON_MUL_byelement, vn.IsVector()) \ + V(mla, NEON_MLA_byelement, vn.IsVector()) \ + V(mls, NEON_MLS_byelement, vn.IsVector()) \ + V(sqdmulh, NEON_SQDMULH_byelement, true) \ + V(sqrdmulh, NEON_SQRDMULH_byelement, true) \ +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ + void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm, \ + int vm_index) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + VIXL_ASSERT(AS); \ + NEONByElement(vd, vn, vm, vm_index, OP); \ + } +NEON_BYELEMENT_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +// clang-format off +#define NEON_BYELEMENT_RDM_LIST(V) \ + V(sqrdmlah, NEON_SQRDMLAH_byelement) \ + V(sqrdmlsh, NEON_SQRDMLSH_byelement) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP) \ + void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm, \ + int vm_index) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kRDM)); \ + NEONByElement(vd, vn, vm, vm_index, OP); \ + } +NEON_BYELEMENT_RDM_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +// clang-format off +#define NEON_FPBYELEMENT_LIST(V) \ + V(fmul, NEON_FMUL_byelement, NEON_FMUL_H_byelement) \ + V(fmla, NEON_FMLA_byelement, NEON_FMLA_H_byelement) \ + V(fmls, NEON_FMLS_byelement, NEON_FMLS_H_byelement) \ + V(fmulx, NEON_FMULX_byelement, NEON_FMULX_H_byelement) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP, OP_H) \ + void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm, \ + int vm_index) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); \ + if (vd.IsLaneSizeH()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \ + NEONFPByElement(vd, vn, vm, vm_index, OP, OP_H); \ + } +NEON_FPBYELEMENT_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +// clang-format off +#define NEON_BYELEMENT_LONG_LIST(V) \ + V(sqdmull, NEON_SQDMULL_byelement, vn.IsScalar() || vn.IsD()) \ + V(sqdmull2, NEON_SQDMULL_byelement, vn.IsVector() && vn.IsQ()) \ + V(sqdmlal, NEON_SQDMLAL_byelement, vn.IsScalar() || vn.IsD()) \ + V(sqdmlal2, NEON_SQDMLAL_byelement, vn.IsVector() && vn.IsQ()) \ + V(sqdmlsl, NEON_SQDMLSL_byelement, vn.IsScalar() || vn.IsD()) \ + V(sqdmlsl2, NEON_SQDMLSL_byelement, vn.IsVector() && vn.IsQ()) \ + V(smull, NEON_SMULL_byelement, vn.IsVector() && vn.IsD()) \ + V(smull2, NEON_SMULL_byelement, vn.IsVector() && vn.IsQ()) \ + V(umull, NEON_UMULL_byelement, vn.IsVector() && vn.IsD()) \ + V(umull2, NEON_UMULL_byelement, vn.IsVector() && vn.IsQ()) \ + V(smlal, NEON_SMLAL_byelement, vn.IsVector() && vn.IsD()) \ + V(smlal2, NEON_SMLAL_byelement, vn.IsVector() && vn.IsQ()) \ + V(umlal, NEON_UMLAL_byelement, vn.IsVector() && vn.IsD()) \ + V(umlal2, NEON_UMLAL_byelement, vn.IsVector() && vn.IsQ()) \ + V(smlsl, NEON_SMLSL_byelement, vn.IsVector() && vn.IsD()) \ + V(smlsl2, NEON_SMLSL_byelement, vn.IsVector() && vn.IsQ()) \ + V(umlsl, NEON_UMLSL_byelement, vn.IsVector() && vn.IsD()) \ + V(umlsl2, NEON_UMLSL_byelement, vn.IsVector() && vn.IsQ()) +// clang-format on + + +#define DEFINE_ASM_FUNC(FN, OP, AS) \ + void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm, \ + int vm_index) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + VIXL_ASSERT(AS); \ + NEONByElementL(vd, vn, vm, vm_index, OP); \ + } +NEON_BYELEMENT_LONG_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +// clang-format off +#define NEON_BYELEMENT_FHM_LIST(V) \ + V(fmlal, NEON_FMLAL_H_byelement) \ + V(fmlal2, NEON_FMLAL2_H_byelement) \ + V(fmlsl, NEON_FMLSL_H_byelement) \ + V(fmlsl2, NEON_FMLSL2_H_byelement) +// clang-format on + + +#define DEFINE_ASM_FUNC(FN, OP) \ + void Assembler::FN(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm, \ + int vm_index) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, \ + CPUFeatures::kFP, \ + CPUFeatures::kNEONHalf, \ + CPUFeatures::kFHM)); \ + VIXL_ASSERT((vd.Is2S() && vn.Is2H()) || (vd.Is4S() && vn.Is4H())); \ + VIXL_ASSERT(vm.IsH()); \ + VIXL_ASSERT((vm_index >= 0) && (vm_index < 8)); \ + /* Vm itself can only be in the bottom 16 registers. */ \ + VIXL_ASSERT(vm.GetCode() < 16); \ + Emit(FPFormat(vd) | OP | Rd(vd) | Rn(vn) | Rm(vm) | \ + ImmNEONHLM(vm_index, 3)); \ + } +NEON_BYELEMENT_FHM_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + +void Assembler::suqadd(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEON2RegMisc(vd, vn, NEON_SUQADD); +} + + +void Assembler::usqadd(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEON2RegMisc(vd, vn, NEON_USQADD); +} + + +void Assembler::abs(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_ABS); +} + + +void Assembler::sqabs(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEON2RegMisc(vd, vn, NEON_SQABS); +} + + +void Assembler::neg(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEON2RegMisc(vd, vn, NEON_NEG); +} + + +void Assembler::sqneg(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEON2RegMisc(vd, vn, NEON_SQNEG); +} + + +void Assembler::NEONXtn(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp vop) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + Instr format, op = vop; + if (vd.IsScalar()) { + VIXL_ASSERT((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) || + (vd.Is1S() && vn.Is1D())); + op |= NEON_Q | NEONScalar; + format = SFormat(vd); + } else { + VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) || + (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) || + (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D())); + format = VFormat(vd); + } + Emit(format | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::xtn(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() && vd.IsD()); + NEONXtn(vd, vn, NEON_XTN); +} + + +void Assembler::xtn2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() && vd.IsQ()); + NEONXtn(vd, vn, NEON_XTN); +} + + +void Assembler::sqxtn(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsScalar() || vd.IsD()); + NEONXtn(vd, vn, NEON_SQXTN); +} + + +void Assembler::sqxtn2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() && vd.IsQ()); + NEONXtn(vd, vn, NEON_SQXTN); +} + + +void Assembler::sqxtun(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsScalar() || vd.IsD()); + NEONXtn(vd, vn, NEON_SQXTUN); +} + + +void Assembler::sqxtun2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() && vd.IsQ()); + NEONXtn(vd, vn, NEON_SQXTUN); +} + + +void Assembler::uqxtn(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsScalar() || vd.IsD()); + NEONXtn(vd, vn, NEON_UQXTN); +} + + +void Assembler::uqxtn2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() && vd.IsQ()); + NEONXtn(vd, vn, NEON_UQXTN); +} + + +// NEON NOT and RBIT are distinguised by bit 22, the bottom bit of "size". +void Assembler::not_(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is8B() || vd.Is16B()); + Emit(VFormat(vd) | NEON_RBIT_NOT | Rn(vn) | Rd(vd)); +} + + +void Assembler::rbit(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is8B() || vd.Is16B()); + Emit(VFormat(vn) | (1 << NEONSize_offset) | NEON_RBIT_NOT | Rn(vn) | Rd(vd)); +} + + +void Assembler::ext(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(vd.Is8B() || vd.Is16B()); + VIXL_ASSERT((0 <= index) && (index < vd.GetLanes())); + Emit(VFormat(vd) | NEON_EXT | Rm(vm) | ImmNEONExt(index) | Rn(vn) | Rd(vd)); +} + + +void Assembler::dup(const VRegister& vd, const VRegister& vn, int vn_index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + Instr q, scalar; + + // We support vn arguments of the form vn.VxT() or vn.T(), where x is the + // number of lanes, and T is b, h, s or d. + int lane_size = vn.GetLaneSizeInBytes(); + NEONFormatField format; + switch (lane_size) { + case 1: + format = NEON_16B; + break; + case 2: + format = NEON_8H; + break; + case 4: + format = NEON_4S; + break; + default: + VIXL_ASSERT(lane_size == 8); + format = NEON_2D; + break; + } + + if (vd.IsScalar()) { + q = NEON_Q; + scalar = NEONScalar; + } else { + VIXL_ASSERT(!vd.Is1D()); + q = vd.IsD() ? 0 : NEON_Q; + scalar = 0; + } + Emit(q | scalar | NEON_DUP_ELEMENT | ImmNEON5(format, vn_index) | Rn(vn) | + Rd(vd)); +} + + +void Assembler::mov(const VRegister& vd, const VRegister& vn, int vn_index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsScalar()); + dup(vd, vn, vn_index); +} + + +void Assembler::dup(const VRegister& vd, const Register& rn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(!vd.Is1D()); + VIXL_ASSERT(vd.Is2D() == rn.IsX()); + int q = vd.IsD() ? 0 : NEON_Q; + Emit(q | NEON_DUP_GENERAL | ImmNEON5(VFormat(vd), 0) | Rn(rn) | Rd(vd)); +} + + +void Assembler::ins(const VRegister& vd, + int vd_index, + const VRegister& vn, + int vn_index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + // We support vd arguments of the form vd.VxT() or vd.T(), where x is the + // number of lanes, and T is b, h, s or d. + int lane_size = vd.GetLaneSizeInBytes(); + NEONFormatField format; + switch (lane_size) { + case 1: + format = NEON_16B; + break; + case 2: + format = NEON_8H; + break; + case 4: + format = NEON_4S; + break; + default: + VIXL_ASSERT(lane_size == 8); + format = NEON_2D; + break; + } + + VIXL_ASSERT( + (0 <= vd_index) && + (vd_index < LaneCountFromFormat(static_cast(format)))); + VIXL_ASSERT( + (0 <= vn_index) && + (vn_index < LaneCountFromFormat(static_cast(format)))); + Emit(NEON_INS_ELEMENT | ImmNEON5(format, vd_index) | + ImmNEON4(format, vn_index) | Rn(vn) | Rd(vd)); +} + + +void Assembler::mov(const VRegister& vd, + int vd_index, + const VRegister& vn, + int vn_index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + ins(vd, vd_index, vn, vn_index); +} + + +void Assembler::ins(const VRegister& vd, int vd_index, const Register& rn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + // We support vd arguments of the form vd.VxT() or vd.T(), where x is the + // number of lanes, and T is b, h, s or d. + int lane_size = vd.GetLaneSizeInBytes(); + NEONFormatField format; + switch (lane_size) { + case 1: + format = NEON_16B; + VIXL_ASSERT(rn.IsW()); + break; + case 2: + format = NEON_8H; + VIXL_ASSERT(rn.IsW()); + break; + case 4: + format = NEON_4S; + VIXL_ASSERT(rn.IsW()); + break; + default: + VIXL_ASSERT(lane_size == 8); + VIXL_ASSERT(rn.IsX()); + format = NEON_2D; + break; + } + + VIXL_ASSERT( + (0 <= vd_index) && + (vd_index < LaneCountFromFormat(static_cast(format)))); + Emit(NEON_INS_GENERAL | ImmNEON5(format, vd_index) | Rn(rn) | Rd(vd)); +} + + +void Assembler::mov(const VRegister& vd, int vd_index, const Register& rn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + ins(vd, vd_index, rn); +} + + +void Assembler::umov(const Register& rd, const VRegister& vn, int vn_index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + // We support vn arguments of the form vn.VxT() or vn.T(), where x is the + // number of lanes, and T is b, h, s or d. + int lane_size = vn.GetLaneSizeInBytes(); + NEONFormatField format; + Instr q = 0; + switch (lane_size) { + case 1: + format = NEON_16B; + VIXL_ASSERT(rd.IsW()); + break; + case 2: + format = NEON_8H; + VIXL_ASSERT(rd.IsW()); + break; + case 4: + format = NEON_4S; + VIXL_ASSERT(rd.IsW()); + break; + default: + VIXL_ASSERT(lane_size == 8); + VIXL_ASSERT(rd.IsX()); + format = NEON_2D; + q = NEON_Q; + break; + } + + VIXL_ASSERT( + (0 <= vn_index) && + (vn_index < LaneCountFromFormat(static_cast(format)))); + Emit(q | NEON_UMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd)); +} + + +void Assembler::mov(const Register& rd, const VRegister& vn, int vn_index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.GetSizeInBytes() >= 4); + umov(rd, vn, vn_index); +} + + +void Assembler::smov(const Register& rd, const VRegister& vn, int vn_index) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + // We support vn arguments of the form vn.VxT() or vn.T(), where x is the + // number of lanes, and T is b, h, s. + int lane_size = vn.GetLaneSizeInBytes(); + NEONFormatField format; + Instr q = 0; + VIXL_ASSERT(lane_size != 8); + switch (lane_size) { + case 1: + format = NEON_16B; + break; + case 2: + format = NEON_8H; + break; + default: + VIXL_ASSERT(lane_size == 4); + VIXL_ASSERT(rd.IsX()); + format = NEON_4S; + break; + } + q = rd.IsW() ? 0 : NEON_Q; + VIXL_ASSERT( + (0 <= vn_index) && + (vn_index < LaneCountFromFormat(static_cast(format)))); + Emit(q | NEON_SMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd)); +} + + +void Assembler::cls(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(!vd.Is1D() && !vd.Is2D()); + Emit(VFormat(vn) | NEON_CLS | Rn(vn) | Rd(vd)); +} + + +void Assembler::clz(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(!vd.Is1D() && !vd.Is2D()); + Emit(VFormat(vn) | NEON_CLZ | Rn(vn) | Rd(vd)); +} + + +void Assembler::cnt(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is8B() || vd.Is16B()); + Emit(VFormat(vn) | NEON_CNT | Rn(vn) | Rd(vd)); +} + + +void Assembler::rev16(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is8B() || vd.Is16B()); + Emit(VFormat(vn) | NEON_REV16 | Rn(vn) | Rd(vd)); +} + + +void Assembler::rev32(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H()); + Emit(VFormat(vn) | NEON_REV32 | Rn(vn) | Rd(vd)); +} + + +void Assembler::rev64(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(!vd.Is1D() && !vd.Is2D()); + Emit(VFormat(vn) | NEON_REV64 | Rn(vn) | Rd(vd)); +} + + +void Assembler::ursqrte(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is2S() || vd.Is4S()); + Emit(VFormat(vn) | NEON_URSQRTE | Rn(vn) | Rd(vd)); +} + + +void Assembler::urecpe(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(AreSameFormat(vd, vn)); + VIXL_ASSERT(vd.Is2S() || vd.Is4S()); + Emit(VFormat(vn) | NEON_URECPE | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONAddlp(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp op) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT((op == NEON_SADDLP) || (op == NEON_UADDLP) || + (op == NEON_SADALP) || (op == NEON_UADALP)); + + VIXL_ASSERT((vn.Is8B() && vd.Is4H()) || (vn.Is4H() && vd.Is2S()) || + (vn.Is2S() && vd.Is1D()) || (vn.Is16B() && vd.Is8H()) || + (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D())); + Emit(VFormat(vn) | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::saddlp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONAddlp(vd, vn, NEON_SADDLP); +} + + +void Assembler::uaddlp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONAddlp(vd, vn, NEON_UADDLP); +} + + +void Assembler::sadalp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONAddlp(vd, vn, NEON_SADALP); +} + + +void Assembler::uadalp(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONAddlp(vd, vn, NEON_UADALP); +} + + +void Assembler::NEONAcrossLanesL(const VRegister& vd, + const VRegister& vn, + NEONAcrossLanesOp op) { + VIXL_ASSERT((vn.Is8B() && vd.Is1H()) || (vn.Is16B() && vd.Is1H()) || + (vn.Is4H() && vd.Is1S()) || (vn.Is8H() && vd.Is1S()) || + (vn.Is4S() && vd.Is1D())); + Emit(VFormat(vn) | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::saddlv(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONAcrossLanesL(vd, vn, NEON_SADDLV); +} + + +void Assembler::uaddlv(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONAcrossLanesL(vd, vn, NEON_UADDLV); +} + + +void Assembler::NEONAcrossLanes(const VRegister& vd, + const VRegister& vn, + NEONAcrossLanesOp op, + Instr op_half) { + VIXL_ASSERT((vn.Is8B() && vd.Is1B()) || (vn.Is16B() && vd.Is1B()) || + (vn.Is4H() && vd.Is1H()) || (vn.Is8H() && vd.Is1H()) || + (vn.Is4S() && vd.Is1S())); + if ((op & NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) { + if (vd.Is1H()) { + VIXL_ASSERT(op_half != 0); + Instr vop = op_half; + if (vn.Is8H()) { + vop |= NEON_Q; + } + Emit(vop | Rn(vn) | Rd(vd)); + } else { + Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd)); + } + } else { + Emit(VFormat(vn) | op | Rn(vn) | Rd(vd)); + } +} + +// clang-format off +#define NEON_ACROSSLANES_LIST(V) \ + V(addv, NEON_ADDV) \ + V(smaxv, NEON_SMAXV) \ + V(sminv, NEON_SMINV) \ + V(umaxv, NEON_UMAXV) \ + V(uminv, NEON_UMINV) +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP) \ + void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); \ + NEONAcrossLanes(vd, vn, OP, 0); \ + } +NEON_ACROSSLANES_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +// clang-format off +#define NEON_ACROSSLANES_FP_LIST(V) \ + V(fmaxv, NEON_FMAXV, NEON_FMAXV_H) \ + V(fminv, NEON_FMINV, NEON_FMINV_H) \ + V(fmaxnmv, NEON_FMAXNMV, NEON_FMAXNMV_H) \ + V(fminnmv, NEON_FMINNMV, NEON_FMINNMV_H) \ +// clang-format on + +#define DEFINE_ASM_FUNC(FN, OP, OP_H) \ + void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ + VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kNEON)); \ + if (vd.Is1H()) VIXL_ASSERT(CPUHas(CPUFeatures::kNEONHalf)); \ + VIXL_ASSERT(vd.Is1S() || vd.Is1H()); \ + NEONAcrossLanes(vd, vn, OP, OP_H); \ + } +NEON_ACROSSLANES_FP_LIST(DEFINE_ASM_FUNC) +#undef DEFINE_ASM_FUNC + + +void Assembler::NEONPerm(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEONPermOp op) { + VIXL_ASSERT(AreSameFormat(vd, vn, vm)); + VIXL_ASSERT(!vd.Is1D()); + Emit(VFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd)); +} + + +void Assembler::trn1(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONPerm(vd, vn, vm, NEON_TRN1); +} + + +void Assembler::trn2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONPerm(vd, vn, vm, NEON_TRN2); +} + + +void Assembler::uzp1(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONPerm(vd, vn, vm, NEON_UZP1); +} + + +void Assembler::uzp2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONPerm(vd, vn, vm, NEON_UZP2); +} + + +void Assembler::zip1(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONPerm(vd, vn, vm, NEON_ZIP1); +} + + +void Assembler::zip2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONPerm(vd, vn, vm, NEON_ZIP2); +} + + +void Assembler::NEONShiftImmediate(const VRegister& vd, + const VRegister& vn, + NEONShiftImmediateOp op, + int immh_immb) { + VIXL_ASSERT(AreSameFormat(vd, vn)); + Instr q, scalar; + if (vn.IsScalar()) { + q = NEON_Q; + scalar = NEONScalar; + } else { + q = vd.IsD() ? 0 : NEON_Q; + scalar = 0; + } + Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONShiftLeftImmediate(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op) { + int laneSizeInBits = vn.GetLaneSizeInBits(); + VIXL_ASSERT((shift >= 0) && (shift < laneSizeInBits)); + NEONShiftImmediate(vd, vn, op, (laneSizeInBits + shift) << 16); +} + + +void Assembler::NEONShiftRightImmediate(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op) { + int laneSizeInBits = vn.GetLaneSizeInBits(); + VIXL_ASSERT((shift >= 1) && (shift <= laneSizeInBits)); + NEONShiftImmediate(vd, vn, op, ((2 * laneSizeInBits) - shift) << 16); +} + + +void Assembler::NEONShiftImmediateL(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op) { + int laneSizeInBits = vn.GetLaneSizeInBits(); + VIXL_ASSERT((shift >= 0) && (shift < laneSizeInBits)); + int immh_immb = (laneSizeInBits + shift) << 16; + + VIXL_ASSERT((vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) || + (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) || + (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D())); + Instr q; + q = vn.IsD() ? 0 : NEON_Q; + Emit(q | op | immh_immb | Rn(vn) | Rd(vd)); +} + + +void Assembler::NEONShiftImmediateN(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op) { + Instr q, scalar; + int laneSizeInBits = vd.GetLaneSizeInBits(); + VIXL_ASSERT((shift >= 1) && (shift <= laneSizeInBits)); + int immh_immb = (2 * laneSizeInBits - shift) << 16; + + if (vn.IsScalar()) { + VIXL_ASSERT((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) || + (vd.Is1S() && vn.Is1D())); + q = NEON_Q; + scalar = NEONScalar; + } else { + VIXL_ASSERT((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) || + (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) || + (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D())); + scalar = 0; + q = vd.IsD() ? 0 : NEON_Q; + } + Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd)); +} + + +void Assembler::shl(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftLeftImmediate(vd, vn, shift, NEON_SHL); +} + + +void Assembler::sli(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftLeftImmediate(vd, vn, shift, NEON_SLI); +} + + +void Assembler::sqshl(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHL_imm); +} + + +void Assembler::sqshlu(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHLU); +} + + +void Assembler::uqshl(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + NEONShiftLeftImmediate(vd, vn, shift, NEON_UQSHL_imm); +} + + +void Assembler::sshll(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsD()); + NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL); +} + + +void Assembler::sshll2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsQ()); + NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL); +} + + +void Assembler::sxtl(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + sshll(vd, vn, 0); +} + + +void Assembler::sxtl2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + sshll2(vd, vn, 0); +} + + +void Assembler::ushll(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsD()); + NEONShiftImmediateL(vd, vn, shift, NEON_USHLL); +} + + +void Assembler::ushll2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsQ()); + NEONShiftImmediateL(vd, vn, shift, NEON_USHLL); +} + + +void Assembler::uxtl(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + ushll(vd, vn, 0); +} + + +void Assembler::uxtl2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + ushll2(vd, vn, 0); +} + + +void Assembler::sri(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SRI); +} + + +void Assembler::sshr(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SSHR); +} + + +void Assembler::ushr(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_USHR); +} + + +void Assembler::srshr(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SRSHR); +} + + +void Assembler::urshr(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_URSHR); +} + + +void Assembler::ssra(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SSRA); +} + + +void Assembler::usra(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_USRA); +} + + +void Assembler::srsra(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_SRSRA); +} + + +void Assembler::ursra(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsVector() || vd.Is1D()); + NEONShiftRightImmediate(vd, vn, shift, NEON_URSRA); +} + + +void Assembler::shrn(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsD()); + NEONShiftImmediateN(vd, vn, shift, NEON_SHRN); +} + + +void Assembler::shrn2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SHRN); +} + + +void Assembler::rshrn(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsD()); + NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN); +} + + +void Assembler::rshrn2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN); +} + + +void Assembler::sqshrn(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN); +} + + +void Assembler::sqshrn2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN); +} + + +void Assembler::sqrshrn(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN); +} + + +void Assembler::sqrshrn2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN); +} + + +void Assembler::sqshrun(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN); +} + + +void Assembler::sqshrun2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN); +} + + +void Assembler::sqrshrun(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN); +} + + +void Assembler::sqrshrun2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN); +} + + +void Assembler::uqshrn(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN); +} + + +void Assembler::uqshrn2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN); +} + + +void Assembler::uqrshrn(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); + NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN); +} + + +void Assembler::uqrshrn2(const VRegister& vd, const VRegister& vn, int shift) { + VIXL_ASSERT(CPUHas(CPUFeatures::kNEON)); + VIXL_ASSERT(vn.IsVector() && vd.IsQ()); + NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN); +} + + +// Note: +// For all ToImm instructions below, a difference in case +// for the same letter indicates a negated bit. +// If b is 1, then B is 0. +uint32_t Assembler::FP16ToImm8(Float16 imm) { + VIXL_ASSERT(IsImmFP16(imm)); + // Half: aBbb.cdef.gh00.0000 (16 bits) + uint16_t bits = Float16ToRawbits(imm); + // bit7: a000.0000 + uint16_t bit7 = ((bits >> 15) & 0x1) << 7; + // bit6: 0b00.0000 + uint16_t bit6 = ((bits >> 13) & 0x1) << 6; + // bit5_to_0: 00cd.efgh + uint16_t bit5_to_0 = (bits >> 6) & 0x3f; + uint32_t result = static_cast(bit7 | bit6 | bit5_to_0); + return result; +} + + +Instr Assembler::ImmFP16(Float16 imm) { + return FP16ToImm8(imm) << ImmFP_offset; +} + + +uint32_t Assembler::FP32ToImm8(float imm) { + VIXL_ASSERT(IsImmFP32(imm)); + // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000 + uint32_t bits = FloatToRawbits(imm); + // bit7: a000.0000 + uint32_t bit7 = ((bits >> 31) & 0x1) << 7; + // bit6: 0b00.0000 + uint32_t bit6 = ((bits >> 29) & 0x1) << 6; + // bit5_to_0: 00cd.efgh + uint32_t bit5_to_0 = (bits >> 19) & 0x3f; + + return bit7 | bit6 | bit5_to_0; +} + + +Instr Assembler::ImmFP32(float imm) { return FP32ToImm8(imm) << ImmFP_offset; } + + +uint32_t Assembler::FP64ToImm8(double imm) { + VIXL_ASSERT(IsImmFP64(imm)); + // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 + // 0000.0000.0000.0000.0000.0000.0000.0000 + uint64_t bits = DoubleToRawbits(imm); + // bit7: a000.0000 + uint64_t bit7 = ((bits >> 63) & 0x1) << 7; + // bit6: 0b00.0000 + uint64_t bit6 = ((bits >> 61) & 0x1) << 6; + // bit5_to_0: 00cd.efgh + uint64_t bit5_to_0 = (bits >> 48) & 0x3f; + + return static_cast(bit7 | bit6 | bit5_to_0); +} + + +Instr Assembler::ImmFP64(double imm) { return FP64ToImm8(imm) << ImmFP_offset; } + + +// Code generation helpers. +void Assembler::MoveWide(const Register& rd, + uint64_t imm, + int shift, + MoveWideImmediateOp mov_op) { + // Ignore the top 32 bits of an immediate if we're moving to a W register. + if (rd.Is32Bits()) { + // Check that the top 32 bits are zero (a positive 32-bit number) or top + // 33 bits are one (a negative 32-bit number, sign extended to 64 bits). + VIXL_ASSERT(((imm >> kWRegSize) == 0) || + ((imm >> (kWRegSize - 1)) == 0x1ffffffff)); + imm &= kWRegMask; + } + + if (shift >= 0) { + // Explicit shift specified. + VIXL_ASSERT((shift == 0) || (shift == 16) || (shift == 32) || + (shift == 48)); + VIXL_ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16)); + shift /= 16; + } else { + // Calculate a new immediate and shift combination to encode the immediate + // argument. + shift = 0; + if ((imm & 0xffffffffffff0000) == 0) { + // Nothing to do. + } else if ((imm & 0xffffffff0000ffff) == 0) { + imm >>= 16; + shift = 1; + } else if ((imm & 0xffff0000ffffffff) == 0) { + VIXL_ASSERT(rd.Is64Bits()); + imm >>= 32; + shift = 2; + } else if ((imm & 0x0000ffffffffffff) == 0) { + VIXL_ASSERT(rd.Is64Bits()); + imm >>= 48; + shift = 3; + } + } + + VIXL_ASSERT(IsUint16(imm)); + + Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) | ImmMoveWide(imm) | + ShiftMoveWide(shift)); +} + + +void Assembler::AddSub(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubOp op) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + if (operand.IsImmediate()) { + int64_t immediate = operand.GetImmediate(); + VIXL_ASSERT(IsImmAddSub(immediate)); + Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); + Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) | + ImmAddSub(static_cast(immediate)) | dest_reg | RnSP(rn)); + } else if (operand.IsShiftedRegister()) { + VIXL_ASSERT(operand.GetRegister().GetSizeInBits() == rd.GetSizeInBits()); + VIXL_ASSERT(operand.GetShift() != ROR); + + // For instructions of the form: + // add/sub wsp, , [, LSL #0-3 ] + // add/sub , wsp, [, LSL #0-3 ] + // add/sub wsp, wsp, [, LSL #0-3 ] + // adds/subs , wsp, [, LSL #0-3 ] + // or their 64-bit register equivalents, convert the operand from shifted to + // extended register mode, and emit an add/sub extended instruction. + if (rn.IsSP() || rd.IsSP()) { + VIXL_ASSERT(!(rd.IsSP() && (S == SetFlags))); + DataProcExtendedRegister(rd, + rn, + operand.ToExtendedRegister(), + S, + AddSubExtendedFixed | op); + } else { + DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op); + } + } else { + VIXL_ASSERT(operand.IsExtendedRegister()); + DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op); + } +} + + +void Assembler::AddSubWithCarry(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubWithCarryOp op) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + VIXL_ASSERT(rd.GetSizeInBits() == operand.GetRegister().GetSizeInBits()); + VIXL_ASSERT(operand.IsShiftedRegister() && (operand.GetShiftAmount() == 0)); + Emit(SF(rd) | op | Flags(S) | Rm(operand.GetRegister()) | Rn(rn) | Rd(rd)); +} + + +void Assembler::hlt(int code) { + VIXL_ASSERT(IsUint16(code)); + Emit(HLT | ImmException(code)); +} + + +void Assembler::brk(int code) { + VIXL_ASSERT(IsUint16(code)); + Emit(BRK | ImmException(code)); +} + + +void Assembler::svc(int code) { Emit(SVC | ImmException(code)); } + + +// TODO(all): The third parameter should be passed by reference but gcc 4.8.2 +// reports a bogus uninitialised warning then. +void Assembler::Logical(const Register& rd, + const Register& rn, + const Operand operand, + LogicalOp op) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + if (operand.IsImmediate()) { + int64_t immediate = operand.GetImmediate(); + unsigned reg_size = rd.GetSizeInBits(); + + VIXL_ASSERT(immediate != 0); + VIXL_ASSERT(immediate != -1); + VIXL_ASSERT(rd.Is64Bits() || IsUint32(immediate)); + + // If the operation is NOT, invert the operation and immediate. + if ((op & NOT) == NOT) { + op = static_cast(op & ~NOT); + immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask); + } + + unsigned n, imm_s, imm_r; + if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { + // Immediate can be encoded in the instruction. + LogicalImmediate(rd, rn, n, imm_s, imm_r, op); + } else { + // This case is handled in the macro assembler. + VIXL_UNREACHABLE(); + } + } else { + VIXL_ASSERT(operand.IsShiftedRegister()); + VIXL_ASSERT(operand.GetRegister().GetSizeInBits() == rd.GetSizeInBits()); + Instr dp_op = static_cast(op | LogicalShiftedFixed); + DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op); + } +} + + +void Assembler::LogicalImmediate(const Register& rd, + const Register& rn, + unsigned n, + unsigned imm_s, + unsigned imm_r, + LogicalOp op) { + unsigned reg_size = rd.GetSizeInBits(); + Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd); + Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) | + ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg | + Rn(rn)); +} + + +void Assembler::ConditionalCompare(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond, + ConditionalCompareOp op) { + Instr ccmpop; + if (operand.IsImmediate()) { + int64_t immediate = operand.GetImmediate(); + VIXL_ASSERT(IsImmConditionalCompare(immediate)); + ccmpop = ConditionalCompareImmediateFixed | op | + ImmCondCmp(static_cast(immediate)); + } else { + VIXL_ASSERT(operand.IsShiftedRegister() && (operand.GetShiftAmount() == 0)); + ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.GetRegister()); + } + Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv)); +} + + +void Assembler::DataProcessing1Source(const Register& rd, + const Register& rn, + DataProcessing1SourceOp op) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + Emit(SF(rn) | op | Rn(rn) | Rd(rd)); +} + + +void Assembler::FPDataProcessing1Source(const VRegister& vd, + const VRegister& vn, + FPDataProcessing1SourceOp op) { + VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); + Emit(FPType(vn) | op | Rn(vn) | Rd(vd)); +} + + +void Assembler::FPDataProcessing3Source(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va, + FPDataProcessing3SourceOp op) { + VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D()); + VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm, va)); + Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd) | Ra(va)); +} + + +void Assembler::NEONModifiedImmShiftLsl(const VRegister& vd, + const int imm8, + const int left_shift, + NEONModifiedImmediateOp op) { + VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H() || vd.Is2S() || + vd.Is4S()); + VIXL_ASSERT((left_shift == 0) || (left_shift == 8) || (left_shift == 16) || + (left_shift == 24)); + VIXL_ASSERT(IsUint8(imm8)); + + int cmode_1, cmode_2, cmode_3; + if (vd.Is8B() || vd.Is16B()) { + VIXL_ASSERT(op == NEONModifiedImmediate_MOVI); + cmode_1 = 1; + cmode_2 = 1; + cmode_3 = 1; + } else { + cmode_1 = (left_shift >> 3) & 1; + cmode_2 = left_shift >> 4; + cmode_3 = 0; + if (vd.Is4H() || vd.Is8H()) { + VIXL_ASSERT((left_shift == 0) || (left_shift == 8)); + cmode_3 = 1; + } + } + int cmode = (cmode_3 << 3) | (cmode_2 << 2) | (cmode_1 << 1); + + int q = vd.IsQ() ? NEON_Q : 0; + + Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd)); +} + + +void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd, + const int imm8, + const int shift_amount, + NEONModifiedImmediateOp op) { + VIXL_ASSERT(vd.Is2S() || vd.Is4S()); + VIXL_ASSERT((shift_amount == 8) || (shift_amount == 16)); + VIXL_ASSERT(IsUint8(imm8)); + + int cmode_0 = (shift_amount >> 4) & 1; + int cmode = 0xc | cmode_0; + + int q = vd.IsQ() ? NEON_Q : 0; + + Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd)); +} + + +void Assembler::EmitShift(const Register& rd, + const Register& rn, + Shift shift, + unsigned shift_amount) { + switch (shift) { + case LSL: + lsl(rd, rn, shift_amount); + break; + case LSR: + lsr(rd, rn, shift_amount); + break; + case ASR: + asr(rd, rn, shift_amount); + break; + case ROR: + ror(rd, rn, shift_amount); + break; + default: + VIXL_UNREACHABLE(); + } +} + + +void Assembler::EmitExtendShift(const Register& rd, + const Register& rn, + Extend extend, + unsigned left_shift) { + VIXL_ASSERT(rd.GetSizeInBits() >= rn.GetSizeInBits()); + unsigned reg_size = rd.GetSizeInBits(); + // Use the correct size of register. + Register rn_ = Register(rn.GetCode(), rd.GetSizeInBits()); + // Bits extracted are high_bit:0. + unsigned high_bit = (8 << (extend & 0x3)) - 1; + // Number of bits left in the result that are not introduced by the shift. + unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1); + + if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) { + switch (extend) { + case UXTB: + case UXTH: + case UXTW: + ubfm(rd, rn_, non_shift_bits, high_bit); + break; + case SXTB: + case SXTH: + case SXTW: + sbfm(rd, rn_, non_shift_bits, high_bit); + break; + case UXTX: + case SXTX: { + VIXL_ASSERT(rn.GetSizeInBits() == kXRegSize); + // Nothing to extend. Just shift. + lsl(rd, rn_, left_shift); + break; + } + default: + VIXL_UNREACHABLE(); + } + } else { + // No need to extend as the extended bits would be shifted away. + lsl(rd, rn_, left_shift); + } +} + + +void Assembler::DataProcShiftedRegister(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + Instr op) { + VIXL_ASSERT(operand.IsShiftedRegister()); + VIXL_ASSERT(rn.Is64Bits() || + (rn.Is32Bits() && IsUint5(operand.GetShiftAmount()))); + Emit(SF(rd) | op | Flags(S) | ShiftDP(operand.GetShift()) | + ImmDPShift(operand.GetShiftAmount()) | Rm(operand.GetRegister()) | + Rn(rn) | Rd(rd)); +} + + +void Assembler::DataProcExtendedRegister(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + Instr op) { + Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); + Emit(SF(rd) | op | Flags(S) | Rm(operand.GetRegister()) | + ExtendMode(operand.GetExtend()) | + ImmExtendShift(operand.GetShiftAmount()) | dest_reg | RnSP(rn)); +} + + +Instr Assembler::LoadStoreMemOperand(const MemOperand& addr, + unsigned access_size, + LoadStoreScalingOption option) { + Instr base = RnSP(addr.GetBaseRegister()); + int64_t offset = addr.GetOffset(); + + if (addr.IsImmediateOffset()) { + bool prefer_unscaled = + (option == PreferUnscaledOffset) || (option == RequireUnscaledOffset); + if (prefer_unscaled && IsImmLSUnscaled(offset)) { + // Use the unscaled addressing mode. + return base | LoadStoreUnscaledOffsetFixed | + ImmLS(static_cast(offset)); + } + + if ((option != RequireUnscaledOffset) && + IsImmLSScaled(offset, access_size)) { + // Use the scaled addressing mode. + return base | LoadStoreUnsignedOffsetFixed | + ImmLSUnsigned(static_cast(offset) >> access_size); + } + + if ((option != RequireScaledOffset) && IsImmLSUnscaled(offset)) { + // Use the unscaled addressing mode. + return base | LoadStoreUnscaledOffsetFixed | + ImmLS(static_cast(offset)); + } + } + + // All remaining addressing modes are register-offset, pre-indexed or + // post-indexed modes. + VIXL_ASSERT((option != RequireUnscaledOffset) && + (option != RequireScaledOffset)); + + if (addr.IsRegisterOffset()) { + Extend ext = addr.GetExtend(); + Shift shift = addr.GetShift(); + unsigned shift_amount = addr.GetShiftAmount(); + + // LSL is encoded in the option field as UXTX. + if (shift == LSL) { + ext = UXTX; + } + + // Shifts are encoded in one bit, indicating a left shift by the memory + // access size. + VIXL_ASSERT((shift_amount == 0) || (shift_amount == access_size)); + return base | LoadStoreRegisterOffsetFixed | Rm(addr.GetRegisterOffset()) | + ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0); + } + + if (addr.IsPreIndex() && IsImmLSUnscaled(offset)) { + return base | LoadStorePreIndexFixed | ImmLS(static_cast(offset)); + } + + if (addr.IsPostIndex() && IsImmLSUnscaled(offset)) { + return base | LoadStorePostIndexFixed | ImmLS(static_cast(offset)); + } + + // If this point is reached, the MemOperand (addr) cannot be encoded. + VIXL_UNREACHABLE(); + return 0; +} + + +void Assembler::LoadStore(const CPURegister& rt, + const MemOperand& addr, + LoadStoreOp op, + LoadStoreScalingOption option) { + VIXL_ASSERT(CPUHas(rt)); + Emit(op | Rt(rt) | LoadStoreMemOperand(addr, CalcLSDataSize(op), option)); +} + +void Assembler::LoadStorePAC(const Register& xt, + const MemOperand& addr, + LoadStorePACOp op) { + VIXL_ASSERT(xt.Is64Bits()); + VIXL_ASSERT(addr.IsImmediateOffset() || addr.IsPreIndex()); + + Instr pac_op = op; + if (addr.IsPreIndex()) { + pac_op |= LoadStorePACPreBit; + } + + Instr base = RnSP(addr.GetBaseRegister()); + int64_t offset = addr.GetOffset(); + + Emit(pac_op | Rt(xt) | base | ImmLSPAC(static_cast(offset))); +} + + +void Assembler::Prefetch(PrefetchOperation op, + const MemOperand& addr, + LoadStoreScalingOption option) { + VIXL_ASSERT(addr.IsRegisterOffset() || addr.IsImmediateOffset()); + + Instr prfop = ImmPrefetchOperation(op); + Emit(PRFM | prfop | LoadStoreMemOperand(addr, kXRegSizeInBytesLog2, option)); +} + + +bool Assembler::IsImmAddSub(int64_t immediate) { + return IsUint12(immediate) || + (IsUint12(immediate >> 12) && ((immediate & 0xfff) == 0)); +} + + +bool Assembler::IsImmConditionalCompare(int64_t immediate) { + return IsUint5(immediate); +} + + +bool Assembler::IsImmFP16(Float16 imm) { + // Valid values will have the form: + // aBbb.cdef.gh00.000 + uint16_t bits = Float16ToRawbits(imm); + // bits[6..0] are cleared. + if ((bits & 0x3f) != 0) { + return false; + } + + // bits[13..12] are all set or all cleared. + uint16_t b_pattern = (bits >> 12) & 0x03; + if (b_pattern != 0 && b_pattern != 0x03) { + return false; + } + + // bit[15] and bit[14] are opposite. + if (((bits ^ (bits << 1)) & 0x4000) == 0) { + return false; + } + + return true; +} + + +bool Assembler::IsImmFP32(float imm) { + // Valid values will have the form: + // aBbb.bbbc.defg.h000.0000.0000.0000.0000 + uint32_t bits = FloatToRawbits(imm); + // bits[19..0] are cleared. + if ((bits & 0x7ffff) != 0) { + return false; + } + + // bits[29..25] are all set or all cleared. + uint32_t b_pattern = (bits >> 16) & 0x3e00; + if (b_pattern != 0 && b_pattern != 0x3e00) { + return false; + } + + // bit[30] and bit[29] are opposite. + if (((bits ^ (bits << 1)) & 0x40000000) == 0) { + return false; + } + + return true; +} + + +bool Assembler::IsImmFP64(double imm) { + // Valid values will have the form: + // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 + // 0000.0000.0000.0000.0000.0000.0000.0000 + uint64_t bits = DoubleToRawbits(imm); + // bits[47..0] are cleared. + if ((bits & 0x0000ffffffffffff) != 0) { + return false; + } + + // bits[61..54] are all set or all cleared. + uint32_t b_pattern = (bits >> 48) & 0x3fc0; + if ((b_pattern != 0) && (b_pattern != 0x3fc0)) { + return false; + } + + // bit[62] and bit[61] are opposite. + if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) { + return false; + } + + return true; +} + + +bool Assembler::IsImmLSPair(int64_t offset, unsigned access_size) { + VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2); + return IsMultiple(offset, 1 << access_size) && + IsInt7(offset / (1 << access_size)); +} + + +bool Assembler::IsImmLSScaled(int64_t offset, unsigned access_size) { + VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2); + return IsMultiple(offset, 1 << access_size) && + IsUint12(offset / (1 << access_size)); +} + + +bool Assembler::IsImmLSUnscaled(int64_t offset) { return IsInt9(offset); } + + +// The movn instruction can generate immediates containing an arbitrary 16-bit +// value, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff. +bool Assembler::IsImmMovn(uint64_t imm, unsigned reg_size) { + return IsImmMovz(~imm, reg_size); +} + + +// The movz instruction can generate immediates containing an arbitrary 16-bit +// value, with remaining bits clear, eg. 0x00001234, 0x0000123400000000. +bool Assembler::IsImmMovz(uint64_t imm, unsigned reg_size) { + VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize)); + return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1); +} + + +// Test if a given value can be encoded in the immediate field of a logical +// instruction. +// If it can be encoded, the function returns true, and values pointed to by n, +// imm_s and imm_r are updated with immediates encoded in the format required +// by the corresponding fields in the logical instruction. +// If it can not be encoded, the function returns false, and the values pointed +// to by n, imm_s and imm_r are undefined. +bool Assembler::IsImmLogical(uint64_t value, + unsigned width, + unsigned* n, + unsigned* imm_s, + unsigned* imm_r) { + VIXL_ASSERT((width == kWRegSize) || (width == kXRegSize)); + + bool negate = false; + + // Logical immediates are encoded using parameters n, imm_s and imm_r using + // the following table: + // + // N imms immr size S R + // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) + // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) + // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) + // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) + // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) + // 0 11110s xxxxxr 2 UInt(s) UInt(r) + // (s bits must not be all set) + // + // A pattern is constructed of size bits, where the least significant S+1 bits + // are set. The pattern is rotated right by R, and repeated across a 32 or + // 64-bit value, depending on destination register width. + // + // Put another way: the basic format of a logical immediate is a single + // contiguous stretch of 1 bits, repeated across the whole word at intervals + // given by a power of 2. To identify them quickly, we first locate the + // lowest stretch of 1 bits, then the next 1 bit above that; that combination + // is different for every logical immediate, so it gives us all the + // information we need to identify the only logical immediate that our input + // could be, and then we simply check if that's the value we actually have. + // + // (The rotation parameter does give the possibility of the stretch of 1 bits + // going 'round the end' of the word. To deal with that, we observe that in + // any situation where that happens the bitwise NOT of the value is also a + // valid logical immediate. So we simply invert the input whenever its low bit + // is set, and then we know that the rotated case can't arise.) + + if (value & 1) { + // If the low bit is 1, negate the value, and set a flag to remember that we + // did (so that we can adjust the return values appropriately). + negate = true; + value = ~value; + } + + if (width == kWRegSize) { + // To handle 32-bit logical immediates, the very easiest thing is to repeat + // the input value twice to make a 64-bit word. The correct encoding of that + // as a logical immediate will also be the correct encoding of the 32-bit + // value. + + // Avoid making the assumption that the most-significant 32 bits are zero by + // shifting the value left and duplicating it. + value <<= kWRegSize; + value |= value >> kWRegSize; + } + + // The basic analysis idea: imagine our input word looks like this. + // + // 0011111000111110001111100011111000111110001111100011111000111110 + // c b a + // |<--d-->| + // + // We find the lowest set bit (as an actual power-of-2 value, not its index) + // and call it a. Then we add a to our original number, which wipes out the + // bottommost stretch of set bits and replaces it with a 1 carried into the + // next zero bit. Then we look for the new lowest set bit, which is in + // position b, and subtract it, so now our number is just like the original + // but with the lowest stretch of set bits completely gone. Now we find the + // lowest set bit again, which is position c in the diagram above. Then we'll + // measure the distance d between bit positions a and c (using CLZ), and that + // tells us that the only valid logical immediate that could possibly be equal + // to this number is the one in which a stretch of bits running from a to just + // below b is replicated every d bits. + uint64_t a = LowestSetBit(value); + uint64_t value_plus_a = value + a; + uint64_t b = LowestSetBit(value_plus_a); + uint64_t value_plus_a_minus_b = value_plus_a - b; + uint64_t c = LowestSetBit(value_plus_a_minus_b); + + int d, clz_a, out_n; + uint64_t mask; + + if (c != 0) { + // The general case, in which there is more than one stretch of set bits. + // Compute the repeat distance d, and set up a bitmask covering the basic + // unit of repetition (i.e. a word with the bottom d bits set). Also, in all + // of these cases the N bit of the output will be zero. + clz_a = CountLeadingZeros(a, kXRegSize); + int clz_c = CountLeadingZeros(c, kXRegSize); + d = clz_a - clz_c; + mask = ((UINT64_C(1) << d) - 1); + out_n = 0; + } else { + // Handle degenerate cases. + // + // If any of those 'find lowest set bit' operations didn't find a set bit at + // all, then the word will have been zero thereafter, so in particular the + // last lowest_set_bit operation will have returned zero. So we can test for + // all the special case conditions in one go by seeing if c is zero. + if (a == 0) { + // The input was zero (or all 1 bits, which will come to here too after we + // inverted it at the start of the function), for which we just return + // false. + return false; + } else { + // Otherwise, if c was zero but a was not, then there's just one stretch + // of set bits in our word, meaning that we have the trivial case of + // d == 64 and only one 'repetition'. Set up all the same variables as in + // the general case above, and set the N bit in the output. + clz_a = CountLeadingZeros(a, kXRegSize); + d = 64; + mask = ~UINT64_C(0); + out_n = 1; + } + } + + // If the repeat period d is not a power of two, it can't be encoded. + if (!IsPowerOf2(d)) { + return false; + } + + if (((b - a) & ~mask) != 0) { + // If the bit stretch (b - a) does not fit within the mask derived from the + // repeat period, then fail. + return false; + } + + // The only possible option is b - a repeated every d bits. Now we're going to + // actually construct the valid logical immediate derived from that + // specification, and see if it equals our original input. + // + // To repeat a value every d bits, we multiply it by a number of the form + // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can + // be derived using a table lookup on CLZ(d). + static const uint64_t multipliers[] = { + 0x0000000000000001UL, + 0x0000000100000001UL, + 0x0001000100010001UL, + 0x0101010101010101UL, + 0x1111111111111111UL, + 0x5555555555555555UL, + }; + uint64_t multiplier = multipliers[CountLeadingZeros(d, kXRegSize) - 57]; + uint64_t candidate = (b - a) * multiplier; + + if (value != candidate) { + // The candidate pattern doesn't match our input value, so fail. + return false; + } + + // We have a match! This is a valid logical immediate, so now we have to + // construct the bits and pieces of the instruction encoding that generates + // it. + + // Count the set bits in our basic stretch. The special case of clz(0) == -1 + // makes the answer come out right for stretches that reach the very top of + // the word (e.g. numbers like 0xffffc00000000000). + int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSize); + int s = clz_a - clz_b; + + // Decide how many bits to rotate right by, to put the low bit of that basic + // stretch in position a. + int r; + if (negate) { + // If we inverted the input right at the start of this function, here's + // where we compensate: the number of set bits becomes the number of clear + // bits, and the rotation count is based on position b rather than position + // a (since b is the location of the 'lowest' 1 bit after inversion). + s = d - s; + r = (clz_b + 1) & (d - 1); + } else { + r = (clz_a + 1) & (d - 1); + } + + // Now we're done, except for having to encode the S output in such a way that + // it gives both the number of set bits and the length of the repeated + // segment. The s field is encoded like this: + // + // imms size S + // ssssss 64 UInt(ssssss) + // 0sssss 32 UInt(sssss) + // 10ssss 16 UInt(ssss) + // 110sss 8 UInt(sss) + // 1110ss 4 UInt(ss) + // 11110s 2 UInt(s) + // + // So we 'or' (2 * -d) with our computed s to form imms. + if ((n != NULL) || (imm_s != NULL) || (imm_r != NULL)) { + *n = out_n; + *imm_s = ((2 * -d) | (s - 1)) & 0x3f; + *imm_r = r; + } + + return true; +} + + +LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) { + VIXL_ASSERT(rt.IsValid()); + if (rt.IsRegister()) { + return rt.Is64Bits() ? LDR_x : LDR_w; + } else { + VIXL_ASSERT(rt.IsVRegister()); + switch (rt.GetSizeInBits()) { + case kBRegSize: + return LDR_b; + case kHRegSize: + return LDR_h; + case kSRegSize: + return LDR_s; + case kDRegSize: + return LDR_d; + default: + VIXL_ASSERT(rt.IsQ()); + return LDR_q; + } + } +} + + +LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) { + VIXL_ASSERT(rt.IsValid()); + if (rt.IsRegister()) { + return rt.Is64Bits() ? STR_x : STR_w; + } else { + VIXL_ASSERT(rt.IsVRegister()); + switch (rt.GetSizeInBits()) { + case kBRegSize: + return STR_b; + case kHRegSize: + return STR_h; + case kSRegSize: + return STR_s; + case kDRegSize: + return STR_d; + default: + VIXL_ASSERT(rt.IsQ()); + return STR_q; + } + } +} + + +LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt, + const CPURegister& rt2) { + VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); + USE(rt2); + if (rt.IsRegister()) { + return rt.Is64Bits() ? STP_x : STP_w; + } else { + VIXL_ASSERT(rt.IsVRegister()); + switch (rt.GetSizeInBytes()) { + case kSRegSizeInBytes: + return STP_s; + case kDRegSizeInBytes: + return STP_d; + default: + VIXL_ASSERT(rt.IsQ()); + return STP_q; + } + } +} + + +LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt, + const CPURegister& rt2) { + VIXL_ASSERT((STP_w | LoadStorePairLBit) == LDP_w); + return static_cast(StorePairOpFor(rt, rt2) | + LoadStorePairLBit); +} + + +LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor( + const CPURegister& rt, const CPURegister& rt2) { + VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); + USE(rt2); + if (rt.IsRegister()) { + return rt.Is64Bits() ? STNP_x : STNP_w; + } else { + VIXL_ASSERT(rt.IsVRegister()); + switch (rt.GetSizeInBytes()) { + case kSRegSizeInBytes: + return STNP_s; + case kDRegSizeInBytes: + return STNP_d; + default: + VIXL_ASSERT(rt.IsQ()); + return STNP_q; + } + } +} + + +LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor( + const CPURegister& rt, const CPURegister& rt2) { + VIXL_ASSERT((STNP_w | LoadStorePairNonTemporalLBit) == LDNP_w); + return static_cast( + StorePairNonTemporalOpFor(rt, rt2) | LoadStorePairNonTemporalLBit); +} + + +LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) { + if (rt.IsRegister()) { + return rt.IsX() ? LDR_x_lit : LDR_w_lit; + } else { + VIXL_ASSERT(rt.IsVRegister()); + switch (rt.GetSizeInBytes()) { + case kSRegSizeInBytes: + return LDR_s_lit; + case kDRegSizeInBytes: + return LDR_d_lit; + default: + VIXL_ASSERT(rt.IsQ()); + return LDR_q_lit; + } + } +} + + +bool Assembler::CPUHas(const CPURegister& rt) const { + // Core registers are available without any particular CPU features. + if (rt.IsRegister()) return true; + VIXL_ASSERT(rt.IsVRegister()); + // The architecture does not allow FP and NEON to be implemented separately, + // but we can crudely categorise them based on register size, since FP only + // uses D, S and (occasionally) H registers. + if (rt.IsH() || rt.IsS() || rt.IsD()) { + return CPUHas(CPUFeatures::kFP) || CPUHas(CPUFeatures::kNEON); + } + VIXL_ASSERT(rt.IsB() || rt.IsQ()); + return CPUHas(CPUFeatures::kNEON); +} + + +bool Assembler::CPUHas(const CPURegister& rt, const CPURegister& rt2) const { + // This is currently only used for loads and stores, where rt and rt2 must + // have the same size and type. We could extend this to cover other cases if + // necessary, but for now we can avoid checking both registers. + VIXL_ASSERT(AreSameSizeAndType(rt, rt2)); + USE(rt2); + return CPUHas(rt); +} + + +bool AreAliased(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3, + const CPURegister& reg4, + const CPURegister& reg5, + const CPURegister& reg6, + const CPURegister& reg7, + const CPURegister& reg8) { + int number_of_valid_regs = 0; + int number_of_valid_fpregs = 0; + + RegList unique_regs = 0; + RegList unique_fpregs = 0; + + const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8}; + + for (size_t i = 0; i < ArrayLength(regs); i++) { + if (regs[i].IsRegister()) { + number_of_valid_regs++; + unique_regs |= regs[i].GetBit(); + } else if (regs[i].IsVRegister()) { + number_of_valid_fpregs++; + unique_fpregs |= regs[i].GetBit(); + } else { + VIXL_ASSERT(!regs[i].IsValid()); + } + } + + int number_of_unique_regs = CountSetBits(unique_regs); + int number_of_unique_fpregs = CountSetBits(unique_fpregs); + + VIXL_ASSERT(number_of_valid_regs >= number_of_unique_regs); + VIXL_ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs); + + return (number_of_valid_regs != number_of_unique_regs) || + (number_of_valid_fpregs != number_of_unique_fpregs); +} + + +bool AreSameSizeAndType(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3, + const CPURegister& reg4, + const CPURegister& reg5, + const CPURegister& reg6, + const CPURegister& reg7, + const CPURegister& reg8) { + VIXL_ASSERT(reg1.IsValid()); + bool match = true; + match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1); + match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1); + match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1); + match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1); + match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1); + match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1); + match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1); + return match; +} + +bool AreEven(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3, + const CPURegister& reg4, + const CPURegister& reg5, + const CPURegister& reg6, + const CPURegister& reg7, + const CPURegister& reg8) { + VIXL_ASSERT(reg1.IsValid()); + bool even = (reg1.GetCode() % 2) == 0; + even &= !reg2.IsValid() || ((reg2.GetCode() % 2) == 0); + even &= !reg3.IsValid() || ((reg3.GetCode() % 2) == 0); + even &= !reg4.IsValid() || ((reg4.GetCode() % 2) == 0); + even &= !reg5.IsValid() || ((reg5.GetCode() % 2) == 0); + even &= !reg6.IsValid() || ((reg6.GetCode() % 2) == 0); + even &= !reg7.IsValid() || ((reg7.GetCode() % 2) == 0); + even &= !reg8.IsValid() || ((reg8.GetCode() % 2) == 0); + return even; +} + + +bool AreConsecutive(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3, + const CPURegister& reg4) { + VIXL_ASSERT(reg1.IsValid()); + + if (!reg2.IsValid()) { + return true; + } else if (reg2.GetCode() != ((reg1.GetCode() + 1) % kNumberOfRegisters)) { + return false; + } + + if (!reg3.IsValid()) { + return true; + } else if (reg3.GetCode() != ((reg2.GetCode() + 1) % kNumberOfRegisters)) { + return false; + } + + if (!reg4.IsValid()) { + return true; + } else if (reg4.GetCode() != ((reg3.GetCode() + 1) % kNumberOfRegisters)) { + return false; + } + + return true; +} + + +bool AreSameFormat(const VRegister& reg1, + const VRegister& reg2, + const VRegister& reg3, + const VRegister& reg4) { + VIXL_ASSERT(reg1.IsValid()); + bool match = true; + match &= !reg2.IsValid() || reg2.IsSameFormat(reg1); + match &= !reg3.IsValid() || reg3.IsSameFormat(reg1); + match &= !reg4.IsValid() || reg4.IsSameFormat(reg1); + return match; +} + + +bool AreConsecutive(const VRegister& reg1, + const VRegister& reg2, + const VRegister& reg3, + const VRegister& reg4) { + VIXL_ASSERT(reg1.IsValid()); + + if (!reg2.IsValid()) { + return true; + } else if (reg2.GetCode() != ((reg1.GetCode() + 1) % kNumberOfVRegisters)) { + return false; + } + + if (!reg3.IsValid()) { + return true; + } else if (reg3.GetCode() != ((reg2.GetCode() + 1) % kNumberOfVRegisters)) { + return false; + } + + if (!reg4.IsValid()) { + return true; + } else if (reg4.GetCode() != ((reg3.GetCode() + 1) % kNumberOfVRegisters)) { + return false; + } + + return true; +} +} // namespace aarch64 +} // namespace vixl diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/assembler-aarch64.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/assembler-aarch64.h new file mode 100644 index 00000000..ecdba12c --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/assembler-aarch64.h @@ -0,0 +1,4548 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_ASSEMBLER_AARCH64_H_ +#define VIXL_AARCH64_ASSEMBLER_AARCH64_H_ + +#include "../assembler-base-vixl.h" +#include "../code-generation-scopes-vixl.h" +#include "../cpu-features.h" +#include "../globals-vixl.h" +#include "../invalset-vixl.h" +#include "../utils-vixl.h" +#include "operands-aarch64.h" + +namespace vixl { +namespace aarch64 { + +class LabelTestHelper; // Forward declaration. + + +class Label { + public: + Label() : location_(kLocationUnbound) {} + ~Label() { + // All links to a label must have been resolved before it is destructed. + VIXL_ASSERT(!IsLinked()); + } + + bool IsBound() const { return location_ >= 0; } + bool IsLinked() const { return !links_.empty(); } + + ptrdiff_t GetLocation() const { return location_; } + VIXL_DEPRECATED("GetLocation", ptrdiff_t location() const) { + return GetLocation(); + } + + static const int kNPreallocatedLinks = 4; + static const ptrdiff_t kInvalidLinkKey = PTRDIFF_MAX; + static const size_t kReclaimFrom = 512; + static const size_t kReclaimFactor = 2; + + typedef InvalSet + LinksSetBase; + typedef InvalSetIterator LabelLinksIteratorBase; + + private: + class LinksSet : public LinksSetBase { + public: + LinksSet() : LinksSetBase() {} + }; + + // Allows iterating over the links of a label. The behaviour is undefined if + // the list of links is modified in any way while iterating. + class LabelLinksIterator : public LabelLinksIteratorBase { + public: + explicit LabelLinksIterator(Label* label) + : LabelLinksIteratorBase(&label->links_) {} + + // TODO: Remove these and use the STL-like interface instead. + using LabelLinksIteratorBase::Advance; + using LabelLinksIteratorBase::Current; + }; + + void Bind(ptrdiff_t location) { + // Labels can only be bound once. + VIXL_ASSERT(!IsBound()); + location_ = location; + } + + void AddLink(ptrdiff_t instruction) { + // If a label is bound, the assembler already has the information it needs + // to write the instruction, so there is no need to add it to links_. + VIXL_ASSERT(!IsBound()); + links_.insert(instruction); + } + + void DeleteLink(ptrdiff_t instruction) { links_.erase(instruction); } + + void ClearAllLinks() { links_.clear(); } + + // TODO: The comment below considers average case complexity for our + // usual use-cases. The elements of interest are: + // - Branches to a label are emitted in order: branch instructions to a label + // are generated at an offset in the code generation buffer greater than any + // other branch to that same label already generated. As an example, this can + // be broken when an instruction is patched to become a branch. Note that the + // code will still work, but the complexity considerations below may locally + // not apply any more. + // - Veneers are generated in order: for multiple branches of the same type + // branching to the same unbound label going out of range, veneers are + // generated in growing order of the branch instruction offset from the start + // of the buffer. + // + // When creating a veneer for a branch going out of range, the link for this + // branch needs to be removed from this `links_`. Since all branches are + // tracked in one underlying InvalSet, the complexity for this deletion is the + // same as for finding the element, ie. O(n), where n is the number of links + // in the set. + // This could be reduced to O(1) by using the same trick as used when tracking + // branch information for veneers: split the container to use one set per type + // of branch. With that setup, when a veneer is created and the link needs to + // be deleted, if the two points above hold, it must be the minimum element of + // the set for its type of branch, and that minimum element will be accessible + // in O(1). + + // The offsets of the instructions that have linked to this label. + LinksSet links_; + // The label location. + ptrdiff_t location_; + + static const ptrdiff_t kLocationUnbound = -1; + +// It is not safe to copy labels, so disable the copy constructor and operator +// by declaring them private (without an implementation). +#if __cplusplus >= 201103L + Label(const Label&) = delete; + void operator=(const Label&) = delete; +#else + Label(const Label&); + void operator=(const Label&); +#endif + + // The Assembler class is responsible for binding and linking labels, since + // the stored offsets need to be consistent with the Assembler's buffer. + friend class Assembler; + // The MacroAssembler and VeneerPool handle resolution of branches to distant + // targets. + friend class MacroAssembler; + friend class VeneerPool; +}; + + +class Assembler; +class LiteralPool; + +// A literal is a 32-bit or 64-bit piece of data stored in the instruction +// stream and loaded through a pc relative load. The same literal can be +// referred to by multiple instructions but a literal can only reside at one +// place in memory. A literal can be used by a load before or after being +// placed in memory. +// +// Internally an offset of 0 is associated with a literal which has been +// neither used nor placed. Then two possibilities arise: +// 1) the label is placed, the offset (stored as offset + 1) is used to +// resolve any subsequent load using the label. +// 2) the label is not placed and offset is the offset of the last load using +// the literal (stored as -offset -1). If multiple loads refer to this +// literal then the last load holds the offset of the preceding load and +// all loads form a chain. Once the offset is placed all the loads in the +// chain are resolved and future loads fall back to possibility 1. +class RawLiteral { + public: + enum DeletionPolicy { + kDeletedOnPlacementByPool, + kDeletedOnPoolDestruction, + kManuallyDeleted + }; + + RawLiteral(size_t size, + LiteralPool* literal_pool, + DeletionPolicy deletion_policy = kManuallyDeleted); + + // The literal pool only sees and deletes `RawLiteral*` pointers, but they are + // actually pointing to `Literal` objects. + virtual ~RawLiteral() {} + + size_t GetSize() const { + VIXL_STATIC_ASSERT(kDRegSizeInBytes == kXRegSizeInBytes); + VIXL_STATIC_ASSERT(kSRegSizeInBytes == kWRegSizeInBytes); + VIXL_ASSERT((size_ == kXRegSizeInBytes) || (size_ == kWRegSizeInBytes) || + (size_ == kQRegSizeInBytes)); + return size_; + } + VIXL_DEPRECATED("GetSize", size_t size()) { return GetSize(); } + + uint64_t GetRawValue128Low64() const { + VIXL_ASSERT(size_ == kQRegSizeInBytes); + return low64_; + } + VIXL_DEPRECATED("GetRawValue128Low64", uint64_t raw_value128_low64()) { + return GetRawValue128Low64(); + } + + uint64_t GetRawValue128High64() const { + VIXL_ASSERT(size_ == kQRegSizeInBytes); + return high64_; + } + VIXL_DEPRECATED("GetRawValue128High64", uint64_t raw_value128_high64()) { + return GetRawValue128High64(); + } + + uint64_t GetRawValue64() const { + VIXL_ASSERT(size_ == kXRegSizeInBytes); + VIXL_ASSERT(high64_ == 0); + return low64_; + } + VIXL_DEPRECATED("GetRawValue64", uint64_t raw_value64()) { + return GetRawValue64(); + } + + uint32_t GetRawValue32() const { + VIXL_ASSERT(size_ == kWRegSizeInBytes); + VIXL_ASSERT(high64_ == 0); + VIXL_ASSERT(IsUint32(low64_) || IsInt32(low64_)); + return static_cast(low64_); + } + VIXL_DEPRECATED("GetRawValue32", uint32_t raw_value32()) { + return GetRawValue32(); + } + + bool IsUsed() const { return offset_ < 0; } + bool IsPlaced() const { return offset_ > 0; } + + LiteralPool* GetLiteralPool() const { return literal_pool_; } + + ptrdiff_t GetOffset() const { + VIXL_ASSERT(IsPlaced()); + return offset_ - 1; + } + VIXL_DEPRECATED("GetOffset", ptrdiff_t offset()) { return GetOffset(); } + + protected: + void SetOffset(ptrdiff_t offset) { + VIXL_ASSERT(offset >= 0); + VIXL_ASSERT(IsWordAligned(offset)); + VIXL_ASSERT(!IsPlaced()); + offset_ = offset + 1; + } + VIXL_DEPRECATED("SetOffset", void set_offset(ptrdiff_t offset)) { + SetOffset(offset); + } + + ptrdiff_t GetLastUse() const { + VIXL_ASSERT(IsUsed()); + return -offset_ - 1; + } + VIXL_DEPRECATED("GetLastUse", ptrdiff_t last_use()) { return GetLastUse(); } + + void SetLastUse(ptrdiff_t offset) { + VIXL_ASSERT(offset >= 0); + VIXL_ASSERT(IsWordAligned(offset)); + VIXL_ASSERT(!IsPlaced()); + offset_ = -offset - 1; + } + VIXL_DEPRECATED("SetLastUse", void set_last_use(ptrdiff_t offset)) { + SetLastUse(offset); + } + + size_t size_; + ptrdiff_t offset_; + uint64_t low64_; + uint64_t high64_; + + private: + LiteralPool* literal_pool_; + DeletionPolicy deletion_policy_; + + friend class Assembler; + friend class LiteralPool; +}; + + +template +class Literal : public RawLiteral { + public: + explicit Literal(T value, + LiteralPool* literal_pool = NULL, + RawLiteral::DeletionPolicy ownership = kManuallyDeleted) + : RawLiteral(sizeof(value), literal_pool, ownership) { + VIXL_STATIC_ASSERT(sizeof(value) <= kXRegSizeInBytes); + UpdateValue(value); + } + + Literal(T high64, + T low64, + LiteralPool* literal_pool = NULL, + RawLiteral::DeletionPolicy ownership = kManuallyDeleted) + : RawLiteral(kQRegSizeInBytes, literal_pool, ownership) { + VIXL_STATIC_ASSERT(sizeof(low64) == (kQRegSizeInBytes / 2)); + UpdateValue(high64, low64); + } + + virtual ~Literal() {} + + // Update the value of this literal, if necessary by rewriting the value in + // the pool. + // If the literal has already been placed in a literal pool, the address of + // the start of the code buffer must be provided, as the literal only knows it + // offset from there. This also allows patching the value after the code has + // been moved in memory. + void UpdateValue(T new_value, uint8_t* code_buffer = NULL) { + VIXL_ASSERT(sizeof(new_value) == size_); + memcpy(&low64_, &new_value, sizeof(new_value)); + if (IsPlaced()) { + VIXL_ASSERT(code_buffer != NULL); + RewriteValueInCode(code_buffer); + } + } + + void UpdateValue(T high64, T low64, uint8_t* code_buffer = NULL) { + VIXL_ASSERT(sizeof(low64) == size_ / 2); + memcpy(&low64_, &low64, sizeof(low64)); + memcpy(&high64_, &high64, sizeof(high64)); + if (IsPlaced()) { + VIXL_ASSERT(code_buffer != NULL); + RewriteValueInCode(code_buffer); + } + } + + void UpdateValue(T new_value, const Assembler* assembler); + void UpdateValue(T high64, T low64, const Assembler* assembler); + + private: + void RewriteValueInCode(uint8_t* code_buffer) { + VIXL_ASSERT(IsPlaced()); + VIXL_STATIC_ASSERT(sizeof(T) <= kXRegSizeInBytes); + switch (GetSize()) { + case kSRegSizeInBytes: + *reinterpret_cast(code_buffer + GetOffset()) = + GetRawValue32(); + break; + case kDRegSizeInBytes: + *reinterpret_cast(code_buffer + GetOffset()) = + GetRawValue64(); + break; + default: + VIXL_ASSERT(GetSize() == kQRegSizeInBytes); + uint64_t* base_address = + reinterpret_cast(code_buffer + GetOffset()); + *base_address = GetRawValue128Low64(); + *(base_address + 1) = GetRawValue128High64(); + } + } +}; + + +// Control whether or not position-independent code should be emitted. +enum PositionIndependentCodeOption { + // All code generated will be position-independent; all branches and + // references to labels generated with the Label class will use PC-relative + // addressing. + PositionIndependentCode, + + // Allow VIXL to generate code that refers to absolute addresses. With this + // option, it will not be possible to copy the code buffer and run it from a + // different address; code must be generated in its final location. + PositionDependentCode, + + // Allow VIXL to assume that the bottom 12 bits of the address will be + // constant, but that the top 48 bits may change. This allows `adrp` to + // function in systems which copy code between pages, but otherwise maintain + // 4KB page alignment. + PageOffsetDependentCode +}; + + +// Control how scaled- and unscaled-offset loads and stores are generated. +enum LoadStoreScalingOption { + // Prefer scaled-immediate-offset instructions, but emit unscaled-offset, + // register-offset, pre-index or post-index instructions if necessary. + PreferScaledOffset, + + // Prefer unscaled-immediate-offset instructions, but emit scaled-offset, + // register-offset, pre-index or post-index instructions if necessary. + PreferUnscaledOffset, + + // Require scaled-immediate-offset instructions. + RequireScaledOffset, + + // Require unscaled-immediate-offset instructions. + RequireUnscaledOffset +}; + + +// Assembler. +class Assembler : public vixl::internal::AssemblerBase { + public: + explicit Assembler( + PositionIndependentCodeOption pic = PositionIndependentCode) + : pic_(pic), cpu_features_(CPUFeatures::AArch64LegacyBaseline()) {} + explicit Assembler( + size_t capacity, + PositionIndependentCodeOption pic = PositionIndependentCode) + : AssemblerBase(capacity), + pic_(pic), + cpu_features_(CPUFeatures::AArch64LegacyBaseline()) {} + Assembler(byte* buffer, + size_t capacity, + PositionIndependentCodeOption pic = PositionIndependentCode) + : AssemblerBase(buffer, capacity), + pic_(pic), + cpu_features_(CPUFeatures::AArch64LegacyBaseline()) {} + + // Upon destruction, the code will assert that one of the following is true: + // * The Assembler object has not been used. + // * Nothing has been emitted since the last Reset() call. + // * Nothing has been emitted since the last FinalizeCode() call. + ~Assembler() {} + + // System functions. + + // Start generating code from the beginning of the buffer, discarding any code + // and data that has already been emitted into the buffer. + void Reset(); + + // Label. + // Bind a label to the current PC. + void bind(Label* label); + + // Bind a label to a specified offset from the start of the buffer. + void BindToOffset(Label* label, ptrdiff_t offset); + + // Place a literal at the current PC. + void place(RawLiteral* literal); + + VIXL_DEPRECATED("GetCursorOffset", ptrdiff_t CursorOffset() const) { + return GetCursorOffset(); + } + + VIXL_DEPRECATED("GetBuffer().GetCapacity()", + ptrdiff_t GetBufferEndOffset() const) { + return static_cast(GetBuffer().GetCapacity()); + } + VIXL_DEPRECATED("GetBuffer().GetCapacity()", + ptrdiff_t BufferEndOffset() const) { + return GetBuffer().GetCapacity(); + } + + // Return the address of a bound label. + template + T GetLabelAddress(const Label* label) const { + VIXL_ASSERT(label->IsBound()); + VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); + return GetBuffer().GetOffsetAddress(label->GetLocation()); + } + + Instruction* GetInstructionAt(ptrdiff_t instruction_offset) { + return GetBuffer()->GetOffsetAddress(instruction_offset); + } + VIXL_DEPRECATED("GetInstructionAt", + Instruction* InstructionAt(ptrdiff_t instruction_offset)) { + return GetInstructionAt(instruction_offset); + } + + ptrdiff_t GetInstructionOffset(Instruction* instruction) { + VIXL_STATIC_ASSERT(sizeof(*instruction) == 1); + ptrdiff_t offset = + instruction - GetBuffer()->GetStartAddress(); + VIXL_ASSERT((0 <= offset) && + (offset < static_cast(GetBuffer()->GetCapacity()))); + return offset; + } + VIXL_DEPRECATED("GetInstructionOffset", + ptrdiff_t InstructionOffset(Instruction* instruction)) { + return GetInstructionOffset(instruction); + } + + // Instruction set functions. + + // Branch / Jump instructions. + // Branch to register. + void br(const Register& xn); + + // Branch with link to register. + void blr(const Register& xn); + + // Branch to register with return hint. + void ret(const Register& xn = lr); + + // Branch to register, with pointer authentication. Using key A and a modifier + // of zero [Armv8.3]. + void braaz(const Register& xn); + + // Branch to register, with pointer authentication. Using key B and a modifier + // of zero [Armv8.3]. + void brabz(const Register& xn); + + // Branch with link to register, with pointer authentication. Using key A and + // a modifier of zero [Armv8.3]. + void blraaz(const Register& xn); + + // Branch with link to register, with pointer authentication. Using key B and + // a modifier of zero [Armv8.3]. + void blrabz(const Register& xn); + + // Return from subroutine, with pointer authentication. Using key A [Armv8.3]. + void retaa(); + + // Return from subroutine, with pointer authentication. Using key B [Armv8.3]. + void retab(); + + // Branch to register, with pointer authentication. Using key A [Armv8.3]. + void braa(const Register& xn, const Register& xm); + + // Branch to register, with pointer authentication. Using key B [Armv8.3]. + void brab(const Register& xn, const Register& xm); + + // Branch with link to register, with pointer authentication. Using key A + // [Armv8.3]. + void blraa(const Register& xn, const Register& xm); + + // Branch with link to register, with pointer authentication. Using key B + // [Armv8.3]. + void blrab(const Register& xn, const Register& xm); + + // Unconditional branch to label. + void b(Label* label); + + // Conditional branch to label. + void b(Label* label, Condition cond); + + // Unconditional branch to PC offset. + void b(int64_t imm26); + + // Conditional branch to PC offset. + void b(int64_t imm19, Condition cond); + + // Branch with link to label. + void bl(Label* label); + + // Branch with link to PC offset. + void bl(int64_t imm26); + + // Compare and branch to label if zero. + void cbz(const Register& rt, Label* label); + + // Compare and branch to PC offset if zero. + void cbz(const Register& rt, int64_t imm19); + + // Compare and branch to label if not zero. + void cbnz(const Register& rt, Label* label); + + // Compare and branch to PC offset if not zero. + void cbnz(const Register& rt, int64_t imm19); + + // Table lookup from one register. + void tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Table lookup from two registers. + void tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vm); + + // Table lookup from three registers. + void tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vm); + + // Table lookup from four registers. + void tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vn4, + const VRegister& vm); + + // Table lookup extension from one register. + void tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Table lookup extension from two registers. + void tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vm); + + // Table lookup extension from three registers. + void tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vm); + + // Table lookup extension from four registers. + void tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vn4, + const VRegister& vm); + + // Test bit and branch to label if zero. + void tbz(const Register& rt, unsigned bit_pos, Label* label); + + // Test bit and branch to PC offset if zero. + void tbz(const Register& rt, unsigned bit_pos, int64_t imm14); + + // Test bit and branch to label if not zero. + void tbnz(const Register& rt, unsigned bit_pos, Label* label); + + // Test bit and branch to PC offset if not zero. + void tbnz(const Register& rt, unsigned bit_pos, int64_t imm14); + + // Address calculation instructions. + // Calculate a PC-relative address. Unlike for branches the offset in adr is + // unscaled (i.e. the result can be unaligned). + + // Calculate the address of a label. + void adr(const Register& xd, Label* label); + + // Calculate the address of a PC offset. + void adr(const Register& xd, int64_t imm21); + + // Calculate the page address of a label. + void adrp(const Register& xd, Label* label); + + // Calculate the page address of a PC offset. + void adrp(const Register& xd, int64_t imm21); + + // Data Processing instructions. + // Add. + void add(const Register& rd, const Register& rn, const Operand& operand); + + // Add and update status flags. + void adds(const Register& rd, const Register& rn, const Operand& operand); + + // Compare negative. + void cmn(const Register& rn, const Operand& operand); + + // Subtract. + void sub(const Register& rd, const Register& rn, const Operand& operand); + + // Subtract and update status flags. + void subs(const Register& rd, const Register& rn, const Operand& operand); + + // Compare. + void cmp(const Register& rn, const Operand& operand); + + // Negate. + void neg(const Register& rd, const Operand& operand); + + // Negate and update status flags. + void negs(const Register& rd, const Operand& operand); + + // Add with carry bit. + void adc(const Register& rd, const Register& rn, const Operand& operand); + + // Add with carry bit and update status flags. + void adcs(const Register& rd, const Register& rn, const Operand& operand); + + // Subtract with carry bit. + void sbc(const Register& rd, const Register& rn, const Operand& operand); + + // Subtract with carry bit and update status flags. + void sbcs(const Register& rd, const Register& rn, const Operand& operand); + + // Rotate register right and insert into NZCV flags under the control of a + // mask [Armv8.4]. + void rmif(const Register& xn, unsigned rotation, StatusFlags flags); + + // Set NZCV flags from register, treated as an 8-bit value [Armv8.4]. + void setf8(const Register& rn); + + // Set NZCV flags from register, treated as an 16-bit value [Armv8.4]. + void setf16(const Register& rn); + + // Negate with carry bit. + void ngc(const Register& rd, const Operand& operand); + + // Negate with carry bit and update status flags. + void ngcs(const Register& rd, const Operand& operand); + + // Logical instructions. + // Bitwise and (A & B). + void and_(const Register& rd, const Register& rn, const Operand& operand); + + // Bitwise and (A & B) and update status flags. + void ands(const Register& rd, const Register& rn, const Operand& operand); + + // Bit test and set flags. + void tst(const Register& rn, const Operand& operand); + + // Bit clear (A & ~B). + void bic(const Register& rd, const Register& rn, const Operand& operand); + + // Bit clear (A & ~B) and update status flags. + void bics(const Register& rd, const Register& rn, const Operand& operand); + + // Bitwise or (A | B). + void orr(const Register& rd, const Register& rn, const Operand& operand); + + // Bitwise nor (A | ~B). + void orn(const Register& rd, const Register& rn, const Operand& operand); + + // Bitwise eor/xor (A ^ B). + void eor(const Register& rd, const Register& rn, const Operand& operand); + + // Bitwise enor/xnor (A ^ ~B). + void eon(const Register& rd, const Register& rn, const Operand& operand); + + // Logical shift left by variable. + void lslv(const Register& rd, const Register& rn, const Register& rm); + + // Logical shift right by variable. + void lsrv(const Register& rd, const Register& rn, const Register& rm); + + // Arithmetic shift right by variable. + void asrv(const Register& rd, const Register& rn, const Register& rm); + + // Rotate right by variable. + void rorv(const Register& rd, const Register& rn, const Register& rm); + + // Bitfield instructions. + // Bitfield move. + void bfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms); + + // Signed bitfield move. + void sbfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms); + + // Unsigned bitfield move. + void ubfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms); + + // Bfm aliases. + // Bitfield insert. + void bfi(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= static_cast(rn.GetSizeInBits())); + bfm(rd, + rn, + (rd.GetSizeInBits() - lsb) & (rd.GetSizeInBits() - 1), + width - 1); + } + + // Bitfield extract and insert low. + void bfxil(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= static_cast(rn.GetSizeInBits())); + bfm(rd, rn, lsb, lsb + width - 1); + } + + // Bitfield clear [Armv8.2]. + void bfc(const Register& rd, unsigned lsb, unsigned width) { + bfi(rd, AppropriateZeroRegFor(rd), lsb, width); + } + + // Sbfm aliases. + // Arithmetic shift right. + void asr(const Register& rd, const Register& rn, unsigned shift) { + VIXL_ASSERT(shift < static_cast(rd.GetSizeInBits())); + sbfm(rd, rn, shift, rd.GetSizeInBits() - 1); + } + + // Signed bitfield insert with zero at right. + void sbfiz(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= static_cast(rn.GetSizeInBits())); + sbfm(rd, + rn, + (rd.GetSizeInBits() - lsb) & (rd.GetSizeInBits() - 1), + width - 1); + } + + // Signed bitfield extract. + void sbfx(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= static_cast(rn.GetSizeInBits())); + sbfm(rd, rn, lsb, lsb + width - 1); + } + + // Signed extend byte. + void sxtb(const Register& rd, const Register& rn) { sbfm(rd, rn, 0, 7); } + + // Signed extend halfword. + void sxth(const Register& rd, const Register& rn) { sbfm(rd, rn, 0, 15); } + + // Signed extend word. + void sxtw(const Register& rd, const Register& rn) { sbfm(rd, rn, 0, 31); } + + // Ubfm aliases. + // Logical shift left. + void lsl(const Register& rd, const Register& rn, unsigned shift) { + unsigned reg_size = rd.GetSizeInBits(); + VIXL_ASSERT(shift < reg_size); + ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1); + } + + // Logical shift right. + void lsr(const Register& rd, const Register& rn, unsigned shift) { + VIXL_ASSERT(shift < static_cast(rd.GetSizeInBits())); + ubfm(rd, rn, shift, rd.GetSizeInBits() - 1); + } + + // Unsigned bitfield insert with zero at right. + void ubfiz(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= static_cast(rn.GetSizeInBits())); + ubfm(rd, + rn, + (rd.GetSizeInBits() - lsb) & (rd.GetSizeInBits() - 1), + width - 1); + } + + // Unsigned bitfield extract. + void ubfx(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(width >= 1); + VIXL_ASSERT(lsb + width <= static_cast(rn.GetSizeInBits())); + ubfm(rd, rn, lsb, lsb + width - 1); + } + + // Unsigned extend byte. + void uxtb(const Register& rd, const Register& rn) { ubfm(rd, rn, 0, 7); } + + // Unsigned extend halfword. + void uxth(const Register& rd, const Register& rn) { ubfm(rd, rn, 0, 15); } + + // Unsigned extend word. + void uxtw(const Register& rd, const Register& rn) { ubfm(rd, rn, 0, 31); } + + // Extract. + void extr(const Register& rd, + const Register& rn, + const Register& rm, + unsigned lsb); + + // Conditional select: rd = cond ? rn : rm. + void csel(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond); + + // Conditional select increment: rd = cond ? rn : rm + 1. + void csinc(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond); + + // Conditional select inversion: rd = cond ? rn : ~rm. + void csinv(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond); + + // Conditional select negation: rd = cond ? rn : -rm. + void csneg(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond); + + // Conditional set: rd = cond ? 1 : 0. + void cset(const Register& rd, Condition cond); + + // Conditional set mask: rd = cond ? -1 : 0. + void csetm(const Register& rd, Condition cond); + + // Conditional increment: rd = cond ? rn + 1 : rn. + void cinc(const Register& rd, const Register& rn, Condition cond); + + // Conditional invert: rd = cond ? ~rn : rn. + void cinv(const Register& rd, const Register& rn, Condition cond); + + // Conditional negate: rd = cond ? -rn : rn. + void cneg(const Register& rd, const Register& rn, Condition cond); + + // Rotate right. + void ror(const Register& rd, const Register& rs, unsigned shift) { + extr(rd, rs, rs, shift); + } + + // Conditional comparison. + // Conditional compare negative. + void ccmn(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond); + + // Conditional compare. + void ccmp(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond); + + // CRC-32 checksum from byte. + void crc32b(const Register& wd, const Register& wn, const Register& wm); + + // CRC-32 checksum from half-word. + void crc32h(const Register& wd, const Register& wn, const Register& wm); + + // CRC-32 checksum from word. + void crc32w(const Register& wd, const Register& wn, const Register& wm); + + // CRC-32 checksum from double word. + void crc32x(const Register& wd, const Register& wn, const Register& xm); + + // CRC-32 C checksum from byte. + void crc32cb(const Register& wd, const Register& wn, const Register& wm); + + // CRC-32 C checksum from half-word. + void crc32ch(const Register& wd, const Register& wn, const Register& wm); + + // CRC-32 C checksum from word. + void crc32cw(const Register& wd, const Register& wn, const Register& wm); + + // CRC-32C checksum from double word. + void crc32cx(const Register& wd, const Register& wn, const Register& xm); + + // Multiply. + void mul(const Register& rd, const Register& rn, const Register& rm); + + // Negated multiply. + void mneg(const Register& rd, const Register& rn, const Register& rm); + + // Signed long multiply: 32 x 32 -> 64-bit. + void smull(const Register& xd, const Register& wn, const Register& wm); + + // Signed multiply high: 64 x 64 -> 64-bit <127:64>. + void smulh(const Register& xd, const Register& xn, const Register& xm); + + // Multiply and accumulate. + void madd(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra); + + // Multiply and subtract. + void msub(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra); + + // Signed long multiply and accumulate: 32 x 32 + 64 -> 64-bit. + void smaddl(const Register& xd, + const Register& wn, + const Register& wm, + const Register& xa); + + // Unsigned long multiply and accumulate: 32 x 32 + 64 -> 64-bit. + void umaddl(const Register& xd, + const Register& wn, + const Register& wm, + const Register& xa); + + // Unsigned long multiply: 32 x 32 -> 64-bit. + void umull(const Register& xd, const Register& wn, const Register& wm) { + umaddl(xd, wn, wm, xzr); + } + + // Unsigned multiply high: 64 x 64 -> 64-bit <127:64>. + void umulh(const Register& xd, const Register& xn, const Register& xm); + + // Signed long multiply and subtract: 64 - (32 x 32) -> 64-bit. + void smsubl(const Register& xd, + const Register& wn, + const Register& wm, + const Register& xa); + + // Unsigned long multiply and subtract: 64 - (32 x 32) -> 64-bit. + void umsubl(const Register& xd, + const Register& wn, + const Register& wm, + const Register& xa); + + // Signed integer divide. + void sdiv(const Register& rd, const Register& rn, const Register& rm); + + // Unsigned integer divide. + void udiv(const Register& rd, const Register& rn, const Register& rm); + + // Bit reverse. + void rbit(const Register& rd, const Register& rn); + + // Reverse bytes in 16-bit half words. + void rev16(const Register& rd, const Register& rn); + + // Reverse bytes in 32-bit words. + void rev32(const Register& xd, const Register& xn); + + // Reverse bytes in 64-bit general purpose register, an alias for rev + // [Armv8.2]. + void rev64(const Register& xd, const Register& xn) { + VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits()); + rev(xd, xn); + } + + // Reverse bytes. + void rev(const Register& rd, const Register& rn); + + // Count leading zeroes. + void clz(const Register& rd, const Register& rn); + + // Count leading sign bits. + void cls(const Register& rd, const Register& rn); + + // Pointer Authentication Code for Instruction address, using key A [Armv8.3]. + void pacia(const Register& xd, const Register& rn); + + // Pointer Authentication Code for Instruction address, using key A and a + // modifier of zero [Armv8.3]. + void paciza(const Register& xd); + + // Pointer Authentication Code for Instruction address, using key A, with + // address in x17 and modifier in x16 [Armv8.3]. + void pacia1716(); + + // Pointer Authentication Code for Instruction address, using key A, with + // address in LR and modifier in SP [Armv8.3]. + void paciasp(); + + // Pointer Authentication Code for Instruction address, using key A, with + // address in LR and a modifier of zero [Armv8.3]. + void paciaz(); + + // Pointer Authentication Code for Instruction address, using key B [Armv8.3]. + void pacib(const Register& xd, const Register& xn); + + // Pointer Authentication Code for Instruction address, using key B and a + // modifier of zero [Armv8.3]. + void pacizb(const Register& xd); + + // Pointer Authentication Code for Instruction address, using key B, with + // address in x17 and modifier in x16 [Armv8.3]. + void pacib1716(); + + // Pointer Authentication Code for Instruction address, using key B, with + // address in LR and modifier in SP [Armv8.3]. + void pacibsp(); + + // Pointer Authentication Code for Instruction address, using key B, with + // address in LR and a modifier of zero [Armv8.3]. + void pacibz(); + + // Pointer Authentication Code for Data address, using key A [Armv8.3]. + void pacda(const Register& xd, const Register& xn); + + // Pointer Authentication Code for Data address, using key A and a modifier of + // zero [Armv8.3]. + void pacdza(const Register& xd); + + // Pointer Authentication Code for Data address, using key A, with address in + // x17 and modifier in x16 [Armv8.3]. + void pacda1716(); + + // Pointer Authentication Code for Data address, using key A, with address in + // LR and modifier in SP [Armv8.3]. + void pacdasp(); + + // Pointer Authentication Code for Data address, using key A, with address in + // LR and a modifier of zero [Armv8.3]. + void pacdaz(); + + // Pointer Authentication Code for Data address, using key B [Armv8.3]. + void pacdb(const Register& xd, const Register& xn); + + // Pointer Authentication Code for Data address, using key B and a modifier of + // zero [Armv8.3]. + void pacdzb(const Register& xd); + + // Pointer Authentication Code for Data address, using key B, with address in + // x17 and modifier in x16 [Armv8.3]. + void pacdb1716(); + + // Pointer Authentication Code for Data address, using key B, with address in + // LR and modifier in SP [Armv8.3]. + void pacdbsp(); + + // Pointer Authentication Code for Data address, using key B, with address in + // LR and a modifier of zero [Armv8.3]. + void pacdbz(); + + // Pointer Authentication Code, using Generic key [Armv8.3]. + void pacga(const Register& xd, const Register& xn, const Register& xm); + + // Authenticate Instruction address, using key A [Armv8.3]. + void autia(const Register& xd, const Register& xn); + + // Authenticate Instruction address, using key A and a modifier of zero + // [Armv8.3]. + void autiza(const Register& xd); + + // Authenticate Instruction address, using key A, with address in x17 and + // modifier in x16 [Armv8.3]. + void autia1716(); + + // Authenticate Instruction address, using key A, with address in LR and + // modifier in SP [Armv8.3]. + void autiasp(); + + // Authenticate Instruction address, using key A, with address in LR and a + // modifier of zero [Armv8.3]. + void autiaz(); + + // Authenticate Instruction address, using key B [Armv8.3]. + void autib(const Register& xd, const Register& xn); + + // Authenticate Instruction address, using key B and a modifier of zero + // [Armv8.3]. + void autizb(const Register& xd); + + // Authenticate Instruction address, using key B, with address in x17 and + // modifier in x16 [Armv8.3]. + void autib1716(); + + // Authenticate Instruction address, using key B, with address in LR and + // modifier in SP [Armv8.3]. + void autibsp(); + + // Authenticate Instruction address, using key B, with address in LR and a + // modifier of zero [Armv8.3]. + void autibz(); + + // Authenticate Data address, using key A [Armv8.3]. + void autda(const Register& xd, const Register& xn); + + // Authenticate Data address, using key A and a modifier of zero [Armv8.3]. + void autdza(const Register& xd); + + // Authenticate Data address, using key A, with address in x17 and modifier in + // x16 [Armv8.3]. + void autda1716(); + + // Authenticate Data address, using key A, with address in LR and modifier in + // SP [Armv8.3]. + void autdasp(); + + // Authenticate Data address, using key A, with address in LR and a modifier + // of zero [Armv8.3]. + void autdaz(); + + // Authenticate Data address, using key B [Armv8.3]. + void autdb(const Register& xd, const Register& xn); + + // Authenticate Data address, using key B and a modifier of zero [Armv8.3]. + void autdzb(const Register& xd); + + // Authenticate Data address, using key B, with address in x17 and modifier in + // x16 [Armv8.3]. + void autdb1716(); + + // Authenticate Data address, using key B, with address in LR and modifier in + // SP [Armv8.3]. + void autdbsp(); + + // Authenticate Data address, using key B, with address in LR and a modifier + // of zero [Armv8.3]. + void autdbz(); + + // Strip Pointer Authentication Code of Data address [Armv8.3]. + void xpacd(const Register& xd); + + // Strip Pointer Authentication Code of Instruction address [Armv8.3]. + void xpaci(const Register& xd); + + // Strip Pointer Authentication Code of Instruction address in LR [Armv8.3]. + void xpaclri(); + + // Memory instructions. + // Load integer or FP register. + void ldr(const CPURegister& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Store integer or FP register. + void str(const CPURegister& rt, + const MemOperand& dst, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load word with sign extension. + void ldrsw(const Register& xt, + const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load byte. + void ldrb(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Store byte. + void strb(const Register& rt, + const MemOperand& dst, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load byte with sign extension. + void ldrsb(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load half-word. + void ldrh(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Store half-word. + void strh(const Register& rt, + const MemOperand& dst, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load half-word with sign extension. + void ldrsh(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load integer or FP register (with unscaled offset). + void ldur(const CPURegister& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Store integer or FP register (with unscaled offset). + void stur(const CPURegister& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load word with sign extension. + void ldursw(const Register& xt, + const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load byte (with unscaled offset). + void ldurb(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Store byte (with unscaled offset). + void sturb(const Register& rt, + const MemOperand& dst, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load byte with sign extension (and unscaled offset). + void ldursb(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load half-word (with unscaled offset). + void ldurh(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Store half-word (with unscaled offset). + void sturh(const Register& rt, + const MemOperand& dst, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load half-word with sign extension (and unscaled offset). + void ldursh(const Register& rt, + const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load double-word with pointer authentication, using data key A and a + // modifier of zero [Armv8.3]. + void ldraa(const Register& xt, const MemOperand& src); + + // Load double-word with pointer authentication, using data key B and a + // modifier of zero [Armv8.3]. + void ldrab(const Register& xt, const MemOperand& src); + + // Load integer or FP register pair. + void ldp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& src); + + // Store integer or FP register pair. + void stp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& dst); + + // Load word pair with sign extension. + void ldpsw(const Register& xt, const Register& xt2, const MemOperand& src); + + // Load integer or FP register pair, non-temporal. + void ldnp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& src); + + // Store integer or FP register pair, non-temporal. + void stnp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& dst); + + // Load integer or FP register from literal pool. + void ldr(const CPURegister& rt, RawLiteral* literal); + + // Load word with sign extension from literal pool. + void ldrsw(const Register& xt, RawLiteral* literal); + + // Load integer or FP register from pc + imm19 << 2. + void ldr(const CPURegister& rt, int64_t imm19); + + // Load word with sign extension from pc + imm19 << 2. + void ldrsw(const Register& xt, int64_t imm19); + + // Store exclusive byte. + void stxrb(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store exclusive half-word. + void stxrh(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store exclusive register. + void stxr(const Register& rs, const Register& rt, const MemOperand& dst); + + // Load exclusive byte. + void ldxrb(const Register& rt, const MemOperand& src); + + // Load exclusive half-word. + void ldxrh(const Register& rt, const MemOperand& src); + + // Load exclusive register. + void ldxr(const Register& rt, const MemOperand& src); + + // Store exclusive register pair. + void stxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst); + + // Load exclusive register pair. + void ldxp(const Register& rt, const Register& rt2, const MemOperand& src); + + // Store-release exclusive byte. + void stlxrb(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store-release exclusive half-word. + void stlxrh(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store-release exclusive register. + void stlxr(const Register& rs, const Register& rt, const MemOperand& dst); + + // Load-acquire exclusive byte. + void ldaxrb(const Register& rt, const MemOperand& src); + + // Load-acquire exclusive half-word. + void ldaxrh(const Register& rt, const MemOperand& src); + + // Load-acquire exclusive register. + void ldaxr(const Register& rt, const MemOperand& src); + + // Store-release exclusive register pair. + void stlxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst); + + // Load-acquire exclusive register pair. + void ldaxp(const Register& rt, const Register& rt2, const MemOperand& src); + + // Store-release byte. + void stlrb(const Register& rt, const MemOperand& dst); + + // Store-release half-word. + void stlrh(const Register& rt, const MemOperand& dst); + + // Store-release register. + void stlr(const Register& rt, const MemOperand& dst); + + // Load-acquire byte. + void ldarb(const Register& rt, const MemOperand& src); + + // Load-acquire half-word. + void ldarh(const Register& rt, const MemOperand& src); + + // Load-acquire register. + void ldar(const Register& rt, const MemOperand& src); + + // Store LORelease byte [Armv8.1]. + void stllrb(const Register& rt, const MemOperand& dst); + + // Store LORelease half-word [Armv8.1]. + void stllrh(const Register& rt, const MemOperand& dst); + + // Store LORelease register [Armv8.1]. + void stllr(const Register& rt, const MemOperand& dst); + + // Load LORelease byte [Armv8.1]. + void ldlarb(const Register& rt, const MemOperand& src); + + // Load LORelease half-word [Armv8.1]. + void ldlarh(const Register& rt, const MemOperand& src); + + // Load LORelease register [Armv8.1]. + void ldlar(const Register& rt, const MemOperand& src); + + // Compare and Swap word or doubleword in memory [Armv8.1]. + void cas(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap word or doubleword in memory [Armv8.1]. + void casa(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap word or doubleword in memory [Armv8.1]. + void casl(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap word or doubleword in memory [Armv8.1]. + void casal(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap byte in memory [Armv8.1]. + void casb(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap byte in memory [Armv8.1]. + void casab(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap byte in memory [Armv8.1]. + void caslb(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap byte in memory [Armv8.1]. + void casalb(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap halfword in memory [Armv8.1]. + void cash(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap halfword in memory [Armv8.1]. + void casah(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap halfword in memory [Armv8.1]. + void caslh(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap halfword in memory [Armv8.1]. + void casalh(const Register& rs, const Register& rt, const MemOperand& src); + + // Compare and Swap Pair of words or doublewords in memory [Armv8.1]. + void casp(const Register& rs, + const Register& rs2, + const Register& rt, + const Register& rt2, + const MemOperand& src); + + // Compare and Swap Pair of words or doublewords in memory [Armv8.1]. + void caspa(const Register& rs, + const Register& rs2, + const Register& rt, + const Register& rt2, + const MemOperand& src); + + // Compare and Swap Pair of words or doublewords in memory [Armv8.1]. + void caspl(const Register& rs, + const Register& rs2, + const Register& rt, + const Register& rt2, + const MemOperand& src); + + // Compare and Swap Pair of words or doublewords in memory [Armv8.1]. + void caspal(const Register& rs, + const Register& rs2, + const Register& rt, + const Register& rt2, + const MemOperand& src); + + // Store-release byte (with unscaled offset) [Armv8.4]. + void stlurb(const Register& rt, const MemOperand& dst); + + // Load-acquire RCpc Register byte (with unscaled offset) [Armv8.4]. + void ldapurb(const Register& rt, const MemOperand& src); + + // Load-acquire RCpc Register signed byte (with unscaled offset) [Armv8.4]. + void ldapursb(const Register& rt, const MemOperand& src); + + // Store-release half-word (with unscaled offset) [Armv8.4]. + void stlurh(const Register& rt, const MemOperand& dst); + + // Load-acquire RCpc Register half-word (with unscaled offset) [Armv8.4]. + void ldapurh(const Register& rt, const MemOperand& src); + + // Load-acquire RCpc Register signed half-word (with unscaled offset) + // [Armv8.4]. + void ldapursh(const Register& rt, const MemOperand& src); + + // Store-release word or double-word (with unscaled offset) [Armv8.4]. + void stlur(const Register& rt, const MemOperand& dst); + + // Load-acquire RCpc Register word or double-word (with unscaled offset) + // [Armv8.4]. + void ldapur(const Register& rt, const MemOperand& src); + + // Load-acquire RCpc Register signed word (with unscaled offset) [Armv8.4]. + void ldapursw(const Register& xt, const MemOperand& src); + + // Atomic add on byte in memory [Armv8.1] + void ldaddb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on byte in memory, with Load-acquire semantics [Armv8.1] + void ldaddab(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on byte in memory, with Store-release semantics [Armv8.1] + void ldaddlb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on byte in memory, with Load-acquire and Store-release semantics + // [Armv8.1] + void ldaddalb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on halfword in memory [Armv8.1] + void ldaddh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on halfword in memory, with Load-acquire semantics [Armv8.1] + void ldaddah(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on halfword in memory, with Store-release semantics [Armv8.1] + void ldaddlh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on halfword in memory, with Load-acquire and Store-release + // semantics [Armv8.1] + void ldaddalh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on word or doubleword in memory [Armv8.1] + void ldadd(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on word or doubleword in memory, with Load-acquire semantics + // [Armv8.1] + void ldadda(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on word or doubleword in memory, with Store-release semantics + // [Armv8.1] + void ldaddl(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on word or doubleword in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldaddal(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on byte in memory [Armv8.1] + void ldclrb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on byte in memory, with Load-acquire semantics [Armv8.1] + void ldclrab(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on byte in memory, with Store-release semantics [Armv8.1] + void ldclrlb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on byte in memory, with Load-acquire and Store-release + // semantics [Armv8.1] + void ldclralb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on halfword in memory [Armv8.1] + void ldclrh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on halfword in memory, with Load-acquire semantics + // [Armv8.1] + void ldclrah(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on halfword in memory, with Store-release semantics + // [Armv8.1] + void ldclrlh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on halfword in memory, with Load-acquire and Store-release + // semantics [Armv8.1] + void ldclralh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on word or doubleword in memory [Armv8.1] + void ldclr(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on word or doubleword in memory, with Load-acquire + // semantics [Armv8.1] + void ldclra(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on word or doubleword in memory, with Store-release + // semantics [Armv8.1] + void ldclrl(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit clear on word or doubleword in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldclral(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on byte in memory [Armv8.1] + void ldeorb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on byte in memory, with Load-acquire semantics + // [Armv8.1] + void ldeorab(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on byte in memory, with Store-release semantics + // [Armv8.1] + void ldeorlb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on byte in memory, with Load-acquire and Store-release + // semantics [Armv8.1] + void ldeoralb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on halfword in memory [Armv8.1] + void ldeorh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on halfword in memory, with Load-acquire semantics + // [Armv8.1] + void ldeorah(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on halfword in memory, with Store-release semantics + // [Armv8.1] + void ldeorlh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on halfword in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldeoralh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on word or doubleword in memory [Armv8.1] + void ldeor(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on word or doubleword in memory, with Load-acquire + // semantics [Armv8.1] + void ldeora(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on word or doubleword in memory, with Store-release + // semantics [Armv8.1] + void ldeorl(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic exclusive OR on word or doubleword in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldeoral(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on byte in memory [Armv8.1] + void ldsetb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on byte in memory, with Load-acquire semantics [Armv8.1] + void ldsetab(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on byte in memory, with Store-release semantics [Armv8.1] + void ldsetlb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on byte in memory, with Load-acquire and Store-release + // semantics [Armv8.1] + void ldsetalb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on halfword in memory [Armv8.1] + void ldseth(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on halfword in memory, with Load-acquire semantics [Armv8.1] + void ldsetah(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on halfword in memory, with Store-release semantics + // [Armv8.1] + void ldsetlh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on halfword in memory, with Load-acquire and Store-release + // semantics [Armv8.1] + void ldsetalh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on word or doubleword in memory [Armv8.1] + void ldset(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on word or doubleword in memory, with Load-acquire semantics + // [Armv8.1] + void ldseta(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on word or doubleword in memory, with Store-release + // semantics [Armv8.1] + void ldsetl(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic bit set on word or doubleword in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldsetal(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on byte in memory [Armv8.1] + void ldsmaxb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on byte in memory, with Load-acquire semantics + // [Armv8.1] + void ldsmaxab(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on byte in memory, with Store-release semantics + // [Armv8.1] + void ldsmaxlb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on byte in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldsmaxalb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on halfword in memory [Armv8.1] + void ldsmaxh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on halfword in memory, with Load-acquire semantics + // [Armv8.1] + void ldsmaxah(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on halfword in memory, with Store-release semantics + // [Armv8.1] + void ldsmaxlh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on halfword in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldsmaxalh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on word or doubleword in memory [Armv8.1] + void ldsmax(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on word or doubleword in memory, with Load-acquire + // semantics [Armv8.1] + void ldsmaxa(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on word or doubleword in memory, with Store-release + // semantics [Armv8.1] + void ldsmaxl(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed maximum on word or doubleword in memory, with Load-acquire + // and Store-release semantics [Armv8.1] + void ldsmaxal(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on byte in memory [Armv8.1] + void ldsminb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on byte in memory, with Load-acquire semantics + // [Armv8.1] + void ldsminab(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on byte in memory, with Store-release semantics + // [Armv8.1] + void ldsminlb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on byte in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldsminalb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on halfword in memory [Armv8.1] + void ldsminh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on halfword in memory, with Load-acquire semantics + // [Armv8.1] + void ldsminah(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on halfword in memory, with Store-release semantics + // [Armv8.1] + void ldsminlh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on halfword in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldsminalh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on word or doubleword in memory [Armv8.1] + void ldsmin(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on word or doubleword in memory, with Load-acquire + // semantics [Armv8.1] + void ldsmina(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on word or doubleword in memory, with Store-release + // semantics [Armv8.1] + void ldsminl(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic signed minimum on word or doubleword in memory, with Load-acquire + // and Store-release semantics [Armv8.1] + void ldsminal(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on byte in memory [Armv8.1] + void ldumaxb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on byte in memory, with Load-acquire semantics + // [Armv8.1] + void ldumaxab(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on byte in memory, with Store-release semantics + // [Armv8.1] + void ldumaxlb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on byte in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldumaxalb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on halfword in memory [Armv8.1] + void ldumaxh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on halfword in memory, with Load-acquire semantics + // [Armv8.1] + void ldumaxah(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on halfword in memory, with Store-release semantics + // [Armv8.1] + void ldumaxlh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on halfword in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void ldumaxalh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on word or doubleword in memory [Armv8.1] + void ldumax(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on word or doubleword in memory, with Load-acquire + // semantics [Armv8.1] + void ldumaxa(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on word or doubleword in memory, with Store-release + // semantics [Armv8.1] + void ldumaxl(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned maximum on word or doubleword in memory, with Load-acquire + // and Store-release semantics [Armv8.1] + void ldumaxal(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on byte in memory [Armv8.1] + void lduminb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on byte in memory, with Load-acquire semantics + // [Armv8.1] + void lduminab(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on byte in memory, with Store-release semantics + // [Armv8.1] + void lduminlb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on byte in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void lduminalb(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on halfword in memory [Armv8.1] + void lduminh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on halfword in memory, with Load-acquire semantics + // [Armv8.1] + void lduminah(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on halfword in memory, with Store-release semantics + // [Armv8.1] + void lduminlh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on halfword in memory, with Load-acquire and + // Store-release semantics [Armv8.1] + void lduminalh(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on word or doubleword in memory [Armv8.1] + void ldumin(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on word or doubleword in memory, with Load-acquire + // semantics [Armv8.1] + void ldumina(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on word or doubleword in memory, with Store-release + // semantics [Armv8.1] + void lduminl(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic unsigned minimum on word or doubleword in memory, with Load-acquire + // and Store-release semantics [Armv8.1] + void lduminal(const Register& rs, const Register& rt, const MemOperand& src); + + // Atomic add on byte in memory, without return. [Armv8.1] + void staddb(const Register& rs, const MemOperand& src); + + // Atomic add on byte in memory, with Store-release semantics and without + // return. [Armv8.1] + void staddlb(const Register& rs, const MemOperand& src); + + // Atomic add on halfword in memory, without return. [Armv8.1] + void staddh(const Register& rs, const MemOperand& src); + + // Atomic add on halfword in memory, with Store-release semantics and without + // return. [Armv8.1] + void staddlh(const Register& rs, const MemOperand& src); + + // Atomic add on word or doubleword in memory, without return. [Armv8.1] + void stadd(const Register& rs, const MemOperand& src); + + // Atomic add on word or doubleword in memory, with Store-release semantics + // and without return. [Armv8.1] + void staddl(const Register& rs, const MemOperand& src); + + // Atomic bit clear on byte in memory, without return. [Armv8.1] + void stclrb(const Register& rs, const MemOperand& src); + + // Atomic bit clear on byte in memory, with Store-release semantics and + // without return. [Armv8.1] + void stclrlb(const Register& rs, const MemOperand& src); + + // Atomic bit clear on halfword in memory, without return. [Armv8.1] + void stclrh(const Register& rs, const MemOperand& src); + + // Atomic bit clear on halfword in memory, with Store-release semantics and + // without return. [Armv8.1] + void stclrlh(const Register& rs, const MemOperand& src); + + // Atomic bit clear on word or doubleword in memory, without return. [Armv8.1] + void stclr(const Register& rs, const MemOperand& src); + + // Atomic bit clear on word or doubleword in memory, with Store-release + // semantics and without return. [Armv8.1] + void stclrl(const Register& rs, const MemOperand& src); + + // Atomic exclusive OR on byte in memory, without return. [Armv8.1] + void steorb(const Register& rs, const MemOperand& src); + + // Atomic exclusive OR on byte in memory, with Store-release semantics and + // without return. [Armv8.1] + void steorlb(const Register& rs, const MemOperand& src); + + // Atomic exclusive OR on halfword in memory, without return. [Armv8.1] + void steorh(const Register& rs, const MemOperand& src); + + // Atomic exclusive OR on halfword in memory, with Store-release semantics + // and without return. [Armv8.1] + void steorlh(const Register& rs, const MemOperand& src); + + // Atomic exclusive OR on word or doubleword in memory, without return. + // [Armv8.1] + void steor(const Register& rs, const MemOperand& src); + + // Atomic exclusive OR on word or doubleword in memory, with Store-release + // semantics and without return. [Armv8.1] + void steorl(const Register& rs, const MemOperand& src); + + // Atomic bit set on byte in memory, without return. [Armv8.1] + void stsetb(const Register& rs, const MemOperand& src); + + // Atomic bit set on byte in memory, with Store-release semantics and without + // return. [Armv8.1] + void stsetlb(const Register& rs, const MemOperand& src); + + // Atomic bit set on halfword in memory, without return. [Armv8.1] + void stseth(const Register& rs, const MemOperand& src); + + // Atomic bit set on halfword in memory, with Store-release semantics and + // without return. [Armv8.1] + void stsetlh(const Register& rs, const MemOperand& src); + + // Atomic bit set on word or doubleword in memory, without return. [Armv8.1] + void stset(const Register& rs, const MemOperand& src); + + // Atomic bit set on word or doubleword in memory, with Store-release + // semantics and without return. [Armv8.1] + void stsetl(const Register& rs, const MemOperand& src); + + // Atomic signed maximum on byte in memory, without return. [Armv8.1] + void stsmaxb(const Register& rs, const MemOperand& src); + + // Atomic signed maximum on byte in memory, with Store-release semantics and + // without return. [Armv8.1] + void stsmaxlb(const Register& rs, const MemOperand& src); + + // Atomic signed maximum on halfword in memory, without return. [Armv8.1] + void stsmaxh(const Register& rs, const MemOperand& src); + + // Atomic signed maximum on halfword in memory, with Store-release semantics + // and without return. [Armv8.1] + void stsmaxlh(const Register& rs, const MemOperand& src); + + // Atomic signed maximum on word or doubleword in memory, without return. + // [Armv8.1] + void stsmax(const Register& rs, const MemOperand& src); + + // Atomic signed maximum on word or doubleword in memory, with Store-release + // semantics and without return. [Armv8.1] + void stsmaxl(const Register& rs, const MemOperand& src); + + // Atomic signed minimum on byte in memory, without return. [Armv8.1] + void stsminb(const Register& rs, const MemOperand& src); + + // Atomic signed minimum on byte in memory, with Store-release semantics and + // without return. [Armv8.1] + void stsminlb(const Register& rs, const MemOperand& src); + + // Atomic signed minimum on halfword in memory, without return. [Armv8.1] + void stsminh(const Register& rs, const MemOperand& src); + + // Atomic signed minimum on halfword in memory, with Store-release semantics + // and without return. [Armv8.1] + void stsminlh(const Register& rs, const MemOperand& src); + + // Atomic signed minimum on word or doubleword in memory, without return. + // [Armv8.1] + void stsmin(const Register& rs, const MemOperand& src); + + // Atomic signed minimum on word or doubleword in memory, with Store-release + // semantics and without return. semantics [Armv8.1] + void stsminl(const Register& rs, const MemOperand& src); + + // Atomic unsigned maximum on byte in memory, without return. [Armv8.1] + void stumaxb(const Register& rs, const MemOperand& src); + + // Atomic unsigned maximum on byte in memory, with Store-release semantics and + // without return. [Armv8.1] + void stumaxlb(const Register& rs, const MemOperand& src); + + // Atomic unsigned maximum on halfword in memory, without return. [Armv8.1] + void stumaxh(const Register& rs, const MemOperand& src); + + // Atomic unsigned maximum on halfword in memory, with Store-release semantics + // and without return. [Armv8.1] + void stumaxlh(const Register& rs, const MemOperand& src); + + // Atomic unsigned maximum on word or doubleword in memory, without return. + // [Armv8.1] + void stumax(const Register& rs, const MemOperand& src); + + // Atomic unsigned maximum on word or doubleword in memory, with Store-release + // semantics and without return. [Armv8.1] + void stumaxl(const Register& rs, const MemOperand& src); + + // Atomic unsigned minimum on byte in memory, without return. [Armv8.1] + void stuminb(const Register& rs, const MemOperand& src); + + // Atomic unsigned minimum on byte in memory, with Store-release semantics and + // without return. [Armv8.1] + void stuminlb(const Register& rs, const MemOperand& src); + + // Atomic unsigned minimum on halfword in memory, without return. [Armv8.1] + void stuminh(const Register& rs, const MemOperand& src); + + // Atomic unsigned minimum on halfword in memory, with Store-release semantics + // and without return. [Armv8.1] + void stuminlh(const Register& rs, const MemOperand& src); + + // Atomic unsigned minimum on word or doubleword in memory, without return. + // [Armv8.1] + void stumin(const Register& rs, const MemOperand& src); + + // Atomic unsigned minimum on word or doubleword in memory, with Store-release + // semantics and without return. [Armv8.1] + void stuminl(const Register& rs, const MemOperand& src); + + // Swap byte in memory [Armv8.1] + void swpb(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap byte in memory, with Load-acquire semantics [Armv8.1] + void swpab(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap byte in memory, with Store-release semantics [Armv8.1] + void swplb(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap byte in memory, with Load-acquire and Store-release semantics + // [Armv8.1] + void swpalb(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap halfword in memory [Armv8.1] + void swph(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap halfword in memory, with Load-acquire semantics [Armv8.1] + void swpah(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap halfword in memory, with Store-release semantics [Armv8.1] + void swplh(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap halfword in memory, with Load-acquire and Store-release semantics + // [Armv8.1] + void swpalh(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap word or doubleword in memory [Armv8.1] + void swp(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap word or doubleword in memory, with Load-acquire semantics [Armv8.1] + void swpa(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap word or doubleword in memory, with Store-release semantics [Armv8.1] + void swpl(const Register& rs, const Register& rt, const MemOperand& src); + + // Swap word or doubleword in memory, with Load-acquire and Store-release + // semantics [Armv8.1] + void swpal(const Register& rs, const Register& rt, const MemOperand& src); + + // Load-Acquire RCpc Register byte [Armv8.3] + void ldaprb(const Register& rt, const MemOperand& src); + + // Load-Acquire RCpc Register halfword [Armv8.3] + void ldaprh(const Register& rt, const MemOperand& src); + + // Load-Acquire RCpc Register word or doubleword [Armv8.3] + void ldapr(const Register& rt, const MemOperand& src); + + // Prefetch memory. + void prfm(PrefetchOperation op, + const MemOperand& addr, + LoadStoreScalingOption option = PreferScaledOffset); + + // Prefetch memory (with unscaled offset). + void prfum(PrefetchOperation op, + const MemOperand& addr, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Prefetch memory in the literal pool. + void prfm(PrefetchOperation op, RawLiteral* literal); + + // Prefetch from pc + imm19 << 2. + void prfm(PrefetchOperation op, int64_t imm19); + + // Move instructions. The default shift of -1 indicates that the move + // instruction will calculate an appropriate 16-bit immediate and left shift + // that is equal to the 64-bit immediate argument. If an explicit left shift + // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value. + // + // For movk, an explicit shift can be used to indicate which half word should + // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant + // half word with zero, whereas movk(x0, 0, 48) will overwrite the + // most-significant. + + // Move immediate and keep. + void movk(const Register& rd, uint64_t imm, int shift = -1) { + MoveWide(rd, imm, shift, MOVK); + } + + // Move inverted immediate. + void movn(const Register& rd, uint64_t imm, int shift = -1) { + MoveWide(rd, imm, shift, MOVN); + } + + // Move immediate. + void movz(const Register& rd, uint64_t imm, int shift = -1) { + MoveWide(rd, imm, shift, MOVZ); + } + + // Misc instructions. + // Monitor debug-mode breakpoint. + void brk(int code); + + // Halting debug-mode breakpoint. + void hlt(int code); + + // Generate exception targeting EL1. + void svc(int code); + + // Move register to register. + void mov(const Register& rd, const Register& rn); + + // Move inverted operand to register. + void mvn(const Register& rd, const Operand& operand); + + // System instructions. + // Move to register from system register. + void mrs(const Register& xt, SystemRegister sysreg); + + // Move from register to system register. + void msr(SystemRegister sysreg, const Register& xt); + + // Invert carry flag [Armv8.4]. + void cfinv(); + + // Convert floating-point condition flags from alternative format to Arm + // format [Armv8.5]. + void xaflag(); + + // Convert floating-point condition flags from Arm format to alternative + // format [Armv8.5]. + void axflag(); + + // System instruction. + void sys(int op1, int crn, int crm, int op2, const Register& xt = xzr); + + // System instruction with pre-encoded op (op1:crn:crm:op2). + void sys(int op, const Register& xt = xzr); + + // System data cache operation. + void dc(DataCacheOp op, const Register& rt); + + // System instruction cache operation. + void ic(InstructionCacheOp op, const Register& rt); + + // System hint (named type). + void hint(SystemHint code); + + // System hint (numbered type). + void hint(int imm7); + + // Clear exclusive monitor. + void clrex(int imm4 = 0xf); + + // Data memory barrier. + void dmb(BarrierDomain domain, BarrierType type); + + // Data synchronization barrier. + void dsb(BarrierDomain domain, BarrierType type); + + // Instruction synchronization barrier. + void isb(); + + // Error synchronization barrier. + void esb(); + + // Conditional speculation dependency barrier. + void csdb(); + + // No-op. + void nop() { hint(NOP); } + + // Branch target identification. + void bti(BranchTargetIdentifier id); + + // FP and NEON instructions. + // Move double precision immediate to FP register. + void fmov(const VRegister& vd, double imm); + + // Move single precision immediate to FP register. + void fmov(const VRegister& vd, float imm); + + // Move half precision immediate to FP register [Armv8.2]. + void fmov(const VRegister& vd, Float16 imm); + + // Move FP register to register. + void fmov(const Register& rd, const VRegister& fn); + + // Move register to FP register. + void fmov(const VRegister& vd, const Register& rn); + + // Move FP register to FP register. + void fmov(const VRegister& vd, const VRegister& fn); + + // Move 64-bit register to top half of 128-bit FP register. + void fmov(const VRegister& vd, int index, const Register& rn); + + // Move top half of 128-bit FP register to 64-bit register. + void fmov(const Register& rd, const VRegister& vn, int index); + + // FP add. + void fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP subtract. + void fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP multiply. + void fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP fused multiply-add. + void fmadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va); + + // FP fused multiply-subtract. + void fmsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va); + + // FP fused multiply-add and negate. + void fnmadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va); + + // FP fused multiply-subtract and negate. + void fnmsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va); + + // FP multiply-negate scalar. + void fnmul(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP reciprocal exponent scalar. + void frecpx(const VRegister& vd, const VRegister& vn); + + // FP divide. + void fdiv(const VRegister& vd, const VRegister& fn, const VRegister& vm); + + // FP maximum. + void fmax(const VRegister& vd, const VRegister& fn, const VRegister& vm); + + // FP minimum. + void fmin(const VRegister& vd, const VRegister& fn, const VRegister& vm); + + // FP maximum number. + void fmaxnm(const VRegister& vd, const VRegister& fn, const VRegister& vm); + + // FP minimum number. + void fminnm(const VRegister& vd, const VRegister& fn, const VRegister& vm); + + // FP absolute. + void fabs(const VRegister& vd, const VRegister& vn); + + // FP negate. + void fneg(const VRegister& vd, const VRegister& vn); + + // FP square root. + void fsqrt(const VRegister& vd, const VRegister& vn); + + // FP round to integer, nearest with ties to away. + void frinta(const VRegister& vd, const VRegister& vn); + + // FP round to integer, implicit rounding. + void frinti(const VRegister& vd, const VRegister& vn); + + // FP round to integer, toward minus infinity. + void frintm(const VRegister& vd, const VRegister& vn); + + // FP round to integer, nearest with ties to even. + void frintn(const VRegister& vd, const VRegister& vn); + + // FP round to integer, toward plus infinity. + void frintp(const VRegister& vd, const VRegister& vn); + + // FP round to integer, exact, implicit rounding. + void frintx(const VRegister& vd, const VRegister& vn); + + // FP round to integer, towards zero. + void frintz(const VRegister& vd, const VRegister& vn); + + void FPCompareMacro(const VRegister& vn, double value, FPTrapFlags trap); + + void FPCompareMacro(const VRegister& vn, + const VRegister& vm, + FPTrapFlags trap); + + // FP compare registers. + void fcmp(const VRegister& vn, const VRegister& vm); + + // FP compare immediate. + void fcmp(const VRegister& vn, double value); + + void FPCCompareMacro(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond, + FPTrapFlags trap); + + // FP conditional compare. + void fccmp(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond); + + // FP signaling compare registers. + void fcmpe(const VRegister& vn, const VRegister& vm); + + // FP signaling compare immediate. + void fcmpe(const VRegister& vn, double value); + + // FP conditional signaling compare. + void fccmpe(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond); + + // FP conditional select. + void fcsel(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Condition cond); + + // Common FP Convert functions. + void NEONFPConvertToInt(const Register& rd, const VRegister& vn, Instr op); + void NEONFPConvertToInt(const VRegister& vd, const VRegister& vn, Instr op); + void NEONFP16ConvertToInt(const VRegister& vd, const VRegister& vn, Instr op); + + // FP convert between precisions. + void fcvt(const VRegister& vd, const VRegister& vn); + + // FP convert to higher precision. + void fcvtl(const VRegister& vd, const VRegister& vn); + + // FP convert to higher precision (second part). + void fcvtl2(const VRegister& vd, const VRegister& vn); + + // FP convert to lower precision. + void fcvtn(const VRegister& vd, const VRegister& vn); + + // FP convert to lower prevision (second part). + void fcvtn2(const VRegister& vd, const VRegister& vn); + + // FP convert to lower precision, rounding to odd. + void fcvtxn(const VRegister& vd, const VRegister& vn); + + // FP convert to lower precision, rounding to odd (second part). + void fcvtxn2(const VRegister& vd, const VRegister& vn); + + // FP convert to signed integer, nearest with ties to away. + void fcvtas(const Register& rd, const VRegister& vn); + + // FP convert to unsigned integer, nearest with ties to away. + void fcvtau(const Register& rd, const VRegister& vn); + + // FP convert to signed integer, nearest with ties to away. + void fcvtas(const VRegister& vd, const VRegister& vn); + + // FP convert to unsigned integer, nearest with ties to away. + void fcvtau(const VRegister& vd, const VRegister& vn); + + // FP convert to signed integer, round towards -infinity. + void fcvtms(const Register& rd, const VRegister& vn); + + // FP convert to unsigned integer, round towards -infinity. + void fcvtmu(const Register& rd, const VRegister& vn); + + // FP convert to signed integer, round towards -infinity. + void fcvtms(const VRegister& vd, const VRegister& vn); + + // FP convert to unsigned integer, round towards -infinity. + void fcvtmu(const VRegister& vd, const VRegister& vn); + + // FP convert to signed integer, nearest with ties to even. + void fcvtns(const Register& rd, const VRegister& vn); + + // FP JavaScript convert to signed integer, rounding toward zero [Armv8.3]. + void fjcvtzs(const Register& rd, const VRegister& vn); + + // FP convert to unsigned integer, nearest with ties to even. + void fcvtnu(const Register& rd, const VRegister& vn); + + // FP convert to signed integer, nearest with ties to even. + void fcvtns(const VRegister& rd, const VRegister& vn); + + // FP convert to unsigned integer, nearest with ties to even. + void fcvtnu(const VRegister& rd, const VRegister& vn); + + // FP convert to signed integer or fixed-point, round towards zero. + void fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0); + + // FP convert to unsigned integer or fixed-point, round towards zero. + void fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0); + + // FP convert to signed integer or fixed-point, round towards zero. + void fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0); + + // FP convert to unsigned integer or fixed-point, round towards zero. + void fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0); + + // FP convert to signed integer, round towards +infinity. + void fcvtps(const Register& rd, const VRegister& vn); + + // FP convert to unsigned integer, round towards +infinity. + void fcvtpu(const Register& rd, const VRegister& vn); + + // FP convert to signed integer, round towards +infinity. + void fcvtps(const VRegister& vd, const VRegister& vn); + + // FP convert to unsigned integer, round towards +infinity. + void fcvtpu(const VRegister& vd, const VRegister& vn); + + // Convert signed integer or fixed point to FP. + void scvtf(const VRegister& fd, const Register& rn, int fbits = 0); + + // Convert unsigned integer or fixed point to FP. + void ucvtf(const VRegister& fd, const Register& rn, int fbits = 0); + + // Convert signed integer or fixed-point to FP. + void scvtf(const VRegister& fd, const VRegister& vn, int fbits = 0); + + // Convert unsigned integer or fixed-point to FP. + void ucvtf(const VRegister& fd, const VRegister& vn, int fbits = 0); + + // Unsigned absolute difference. + void uabd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed absolute difference. + void sabd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned absolute difference and accumulate. + void uaba(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed absolute difference and accumulate. + void saba(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Add. + void add(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Subtract. + void sub(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned halving add. + void uhadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed halving add. + void shadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned rounding halving add. + void urhadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed rounding halving add. + void srhadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned halving sub. + void uhsub(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed halving sub. + void shsub(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned saturating add. + void uqadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating add. + void sqadd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned saturating subtract. + void uqsub(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating subtract. + void sqsub(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Add pairwise. + void addp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Add pair of elements scalar. + void addp(const VRegister& vd, const VRegister& vn); + + // Multiply-add to accumulator. + void mla(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Multiply-subtract to accumulator. + void mls(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Multiply. + void mul(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Multiply by scalar element. + void mul(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Multiply-add by scalar element. + void mla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Multiply-subtract by scalar element. + void mls(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed long multiply-add by scalar element. + void smlal(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed long multiply-add by scalar element (second part). + void smlal2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply-add by scalar element. + void umlal(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply-add by scalar element (second part). + void umlal2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed long multiply-sub by scalar element. + void smlsl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed long multiply-sub by scalar element (second part). + void smlsl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply-sub by scalar element. + void umlsl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply-sub by scalar element (second part). + void umlsl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed long multiply by scalar element. + void smull(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed long multiply by scalar element (second part). + void smull2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply by scalar element. + void umull(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply by scalar element (second part). + void umull2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating double long multiply by element. + void sqdmull(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating double long multiply by element (second part). + void sqdmull2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating doubling long multiply-add by element. + void sqdmlal(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating doubling long multiply-add by element (second part). + void sqdmlal2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating doubling long multiply-sub by element. + void sqdmlsl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating doubling long multiply-sub by element (second part). + void sqdmlsl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Compare equal. + void cmeq(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Compare signed greater than or equal. + void cmge(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Compare signed greater than. + void cmgt(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Compare unsigned higher. + void cmhi(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Compare unsigned higher or same. + void cmhs(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Compare bitwise test bits nonzero. + void cmtst(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Compare bitwise to zero. + void cmeq(const VRegister& vd, const VRegister& vn, int value); + + // Compare signed greater than or equal to zero. + void cmge(const VRegister& vd, const VRegister& vn, int value); + + // Compare signed greater than zero. + void cmgt(const VRegister& vd, const VRegister& vn, int value); + + // Compare signed less than or equal to zero. + void cmle(const VRegister& vd, const VRegister& vn, int value); + + // Compare signed less than zero. + void cmlt(const VRegister& vd, const VRegister& vn, int value); + + // Signed shift left by register. + void sshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned shift left by register. + void ushl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating shift left by register. + void sqshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned saturating shift left by register. + void uqshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed rounding shift left by register. + void srshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned rounding shift left by register. + void urshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating rounding shift left by register. + void sqrshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned saturating rounding shift left by register. + void uqrshl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise and. + void and_(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise or. + void orr(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise or immediate. + void orr(const VRegister& vd, const int imm8, const int left_shift = 0); + + // Move register to register. + void mov(const VRegister& vd, const VRegister& vn); + + // Bitwise orn. + void orn(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise eor. + void eor(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bit clear immediate. + void bic(const VRegister& vd, const int imm8, const int left_shift = 0); + + // Bit clear. + void bic(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise insert if false. + void bif(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise insert if true. + void bit(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Bitwise select. + void bsl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Polynomial multiply. + void pmul(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Vector move immediate. + void movi(const VRegister& vd, + const uint64_t imm, + Shift shift = LSL, + const int shift_amount = 0); + + // Bitwise not. + void mvn(const VRegister& vd, const VRegister& vn); + + // Vector move inverted immediate. + void mvni(const VRegister& vd, + const int imm8, + Shift shift = LSL, + const int shift_amount = 0); + + // Signed saturating accumulate of unsigned value. + void suqadd(const VRegister& vd, const VRegister& vn); + + // Unsigned saturating accumulate of signed value. + void usqadd(const VRegister& vd, const VRegister& vn); + + // Absolute value. + void abs(const VRegister& vd, const VRegister& vn); + + // Signed saturating absolute value. + void sqabs(const VRegister& vd, const VRegister& vn); + + // Negate. + void neg(const VRegister& vd, const VRegister& vn); + + // Signed saturating negate. + void sqneg(const VRegister& vd, const VRegister& vn); + + // Bitwise not. + void not_(const VRegister& vd, const VRegister& vn); + + // Extract narrow. + void xtn(const VRegister& vd, const VRegister& vn); + + // Extract narrow (second part). + void xtn2(const VRegister& vd, const VRegister& vn); + + // Signed saturating extract narrow. + void sqxtn(const VRegister& vd, const VRegister& vn); + + // Signed saturating extract narrow (second part). + void sqxtn2(const VRegister& vd, const VRegister& vn); + + // Unsigned saturating extract narrow. + void uqxtn(const VRegister& vd, const VRegister& vn); + + // Unsigned saturating extract narrow (second part). + void uqxtn2(const VRegister& vd, const VRegister& vn); + + // Signed saturating extract unsigned narrow. + void sqxtun(const VRegister& vd, const VRegister& vn); + + // Signed saturating extract unsigned narrow (second part). + void sqxtun2(const VRegister& vd, const VRegister& vn); + + // Extract vector from pair of vectors. + void ext(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int index); + + // Duplicate vector element to vector or scalar. + void dup(const VRegister& vd, const VRegister& vn, int vn_index); + + // Move vector element to scalar. + void mov(const VRegister& vd, const VRegister& vn, int vn_index); + + // Duplicate general-purpose register to vector. + void dup(const VRegister& vd, const Register& rn); + + // Insert vector element from another vector element. + void ins(const VRegister& vd, + int vd_index, + const VRegister& vn, + int vn_index); + + // Move vector element to another vector element. + void mov(const VRegister& vd, + int vd_index, + const VRegister& vn, + int vn_index); + + // Insert vector element from general-purpose register. + void ins(const VRegister& vd, int vd_index, const Register& rn); + + // Move general-purpose register to a vector element. + void mov(const VRegister& vd, int vd_index, const Register& rn); + + // Unsigned move vector element to general-purpose register. + void umov(const Register& rd, const VRegister& vn, int vn_index); + + // Move vector element to general-purpose register. + void mov(const Register& rd, const VRegister& vn, int vn_index); + + // Signed move vector element to general-purpose register. + void smov(const Register& rd, const VRegister& vn, int vn_index); + + // One-element structure load to one register. + void ld1(const VRegister& vt, const MemOperand& src); + + // One-element structure load to two registers. + void ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src); + + // One-element structure load to three registers. + void ld1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src); + + // One-element structure load to four registers. + void ld1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src); + + // One-element single structure load to one lane. + void ld1(const VRegister& vt, int lane, const MemOperand& src); + + // One-element single structure load to all lanes. + void ld1r(const VRegister& vt, const MemOperand& src); + + // Two-element structure load. + void ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src); + + // Two-element single structure load to one lane. + void ld2(const VRegister& vt, + const VRegister& vt2, + int lane, + const MemOperand& src); + + // Two-element single structure load to all lanes. + void ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src); + + // Three-element structure load. + void ld3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src); + + // Three-element single structure load to one lane. + void ld3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + int lane, + const MemOperand& src); + + // Three-element single structure load to all lanes. + void ld3r(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src); + + // Four-element structure load. + void ld4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src); + + // Four-element single structure load to one lane. + void ld4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + int lane, + const MemOperand& src); + + // Four-element single structure load to all lanes. + void ld4r(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src); + + // Count leading sign bits. + void cls(const VRegister& vd, const VRegister& vn); + + // Count leading zero bits (vector). + void clz(const VRegister& vd, const VRegister& vn); + + // Population count per byte. + void cnt(const VRegister& vd, const VRegister& vn); + + // Reverse bit order. + void rbit(const VRegister& vd, const VRegister& vn); + + // Reverse elements in 16-bit halfwords. + void rev16(const VRegister& vd, const VRegister& vn); + + // Reverse elements in 32-bit words. + void rev32(const VRegister& vd, const VRegister& vn); + + // Reverse elements in 64-bit doublewords. + void rev64(const VRegister& vd, const VRegister& vn); + + // Unsigned reciprocal square root estimate. + void ursqrte(const VRegister& vd, const VRegister& vn); + + // Unsigned reciprocal estimate. + void urecpe(const VRegister& vd, const VRegister& vn); + + // Signed pairwise long add. + void saddlp(const VRegister& vd, const VRegister& vn); + + // Unsigned pairwise long add. + void uaddlp(const VRegister& vd, const VRegister& vn); + + // Signed pairwise long add and accumulate. + void sadalp(const VRegister& vd, const VRegister& vn); + + // Unsigned pairwise long add and accumulate. + void uadalp(const VRegister& vd, const VRegister& vn); + + // Shift left by immediate. + void shl(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating shift left by immediate. + void sqshl(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating shift left unsigned by immediate. + void sqshlu(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned saturating shift left by immediate. + void uqshl(const VRegister& vd, const VRegister& vn, int shift); + + // Signed shift left long by immediate. + void sshll(const VRegister& vd, const VRegister& vn, int shift); + + // Signed shift left long by immediate (second part). + void sshll2(const VRegister& vd, const VRegister& vn, int shift); + + // Signed extend long. + void sxtl(const VRegister& vd, const VRegister& vn); + + // Signed extend long (second part). + void sxtl2(const VRegister& vd, const VRegister& vn); + + // Unsigned shift left long by immediate. + void ushll(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned shift left long by immediate (second part). + void ushll2(const VRegister& vd, const VRegister& vn, int shift); + + // Shift left long by element size. + void shll(const VRegister& vd, const VRegister& vn, int shift); + + // Shift left long by element size (second part). + void shll2(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned extend long. + void uxtl(const VRegister& vd, const VRegister& vn); + + // Unsigned extend long (second part). + void uxtl2(const VRegister& vd, const VRegister& vn); + + // Shift left by immediate and insert. + void sli(const VRegister& vd, const VRegister& vn, int shift); + + // Shift right by immediate and insert. + void sri(const VRegister& vd, const VRegister& vn, int shift); + + // Signed maximum. + void smax(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed pairwise maximum. + void smaxp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Add across vector. + void addv(const VRegister& vd, const VRegister& vn); + + // Signed add long across vector. + void saddlv(const VRegister& vd, const VRegister& vn); + + // Unsigned add long across vector. + void uaddlv(const VRegister& vd, const VRegister& vn); + + // FP maximum number across vector. + void fmaxnmv(const VRegister& vd, const VRegister& vn); + + // FP maximum across vector. + void fmaxv(const VRegister& vd, const VRegister& vn); + + // FP minimum number across vector. + void fminnmv(const VRegister& vd, const VRegister& vn); + + // FP minimum across vector. + void fminv(const VRegister& vd, const VRegister& vn); + + // Signed maximum across vector. + void smaxv(const VRegister& vd, const VRegister& vn); + + // Signed minimum. + void smin(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed minimum pairwise. + void sminp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed minimum across vector. + void sminv(const VRegister& vd, const VRegister& vn); + + // One-element structure store from one register. + void st1(const VRegister& vt, const MemOperand& src); + + // One-element structure store from two registers. + void st1(const VRegister& vt, const VRegister& vt2, const MemOperand& src); + + // One-element structure store from three registers. + void st1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src); + + // One-element structure store from four registers. + void st1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src); + + // One-element single structure store from one lane. + void st1(const VRegister& vt, int lane, const MemOperand& src); + + // Two-element structure store from two registers. + void st2(const VRegister& vt, const VRegister& vt2, const MemOperand& src); + + // Two-element single structure store from two lanes. + void st2(const VRegister& vt, + const VRegister& vt2, + int lane, + const MemOperand& src); + + // Three-element structure store from three registers. + void st3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src); + + // Three-element single structure store from three lanes. + void st3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + int lane, + const MemOperand& src); + + // Four-element structure store from four registers. + void st4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src); + + // Four-element single structure store from four lanes. + void st4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + int lane, + const MemOperand& src); + + // Unsigned add long. + void uaddl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned add long (second part). + void uaddl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned add wide. + void uaddw(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned add wide (second part). + void uaddw2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed add long. + void saddl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed add long (second part). + void saddl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed add wide. + void saddw(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed add wide (second part). + void saddw2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned subtract long. + void usubl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned subtract long (second part). + void usubl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned subtract wide. + void usubw(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned subtract wide (second part). + void usubw2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed subtract long. + void ssubl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed subtract long (second part). + void ssubl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed integer subtract wide. + void ssubw(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed integer subtract wide (second part). + void ssubw2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned maximum. + void umax(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned pairwise maximum. + void umaxp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned maximum across vector. + void umaxv(const VRegister& vd, const VRegister& vn); + + // Unsigned minimum. + void umin(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned pairwise minimum. + void uminp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned minimum across vector. + void uminv(const VRegister& vd, const VRegister& vn); + + // Transpose vectors (primary). + void trn1(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Transpose vectors (secondary). + void trn2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unzip vectors (primary). + void uzp1(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unzip vectors (secondary). + void uzp2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Zip vectors (primary). + void zip1(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Zip vectors (secondary). + void zip2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed shift right by immediate. + void sshr(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned shift right by immediate. + void ushr(const VRegister& vd, const VRegister& vn, int shift); + + // Signed rounding shift right by immediate. + void srshr(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned rounding shift right by immediate. + void urshr(const VRegister& vd, const VRegister& vn, int shift); + + // Signed shift right by immediate and accumulate. + void ssra(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned shift right by immediate and accumulate. + void usra(const VRegister& vd, const VRegister& vn, int shift); + + // Signed rounding shift right by immediate and accumulate. + void srsra(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned rounding shift right by immediate and accumulate. + void ursra(const VRegister& vd, const VRegister& vn, int shift); + + // Shift right narrow by immediate. + void shrn(const VRegister& vd, const VRegister& vn, int shift); + + // Shift right narrow by immediate (second part). + void shrn2(const VRegister& vd, const VRegister& vn, int shift); + + // Rounding shift right narrow by immediate. + void rshrn(const VRegister& vd, const VRegister& vn, int shift); + + // Rounding shift right narrow by immediate (second part). + void rshrn2(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned saturating shift right narrow by immediate. + void uqshrn(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned saturating shift right narrow by immediate (second part). + void uqshrn2(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned saturating rounding shift right narrow by immediate. + void uqrshrn(const VRegister& vd, const VRegister& vn, int shift); + + // Unsigned saturating rounding shift right narrow by immediate (second part). + void uqrshrn2(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating shift right narrow by immediate. + void sqshrn(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating shift right narrow by immediate (second part). + void sqshrn2(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating rounded shift right narrow by immediate. + void sqrshrn(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating rounded shift right narrow by immediate (second part). + void sqrshrn2(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating shift right unsigned narrow by immediate. + void sqshrun(const VRegister& vd, const VRegister& vn, int shift); + + // Signed saturating shift right unsigned narrow by immediate (second part). + void sqshrun2(const VRegister& vd, const VRegister& vn, int shift); + + // Signed sat rounded shift right unsigned narrow by immediate. + void sqrshrun(const VRegister& vd, const VRegister& vn, int shift); + + // Signed sat rounded shift right unsigned narrow by immediate (second part). + void sqrshrun2(const VRegister& vd, const VRegister& vn, int shift); + + // FP reciprocal step. + void frecps(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP reciprocal estimate. + void frecpe(const VRegister& vd, const VRegister& vn); + + // FP reciprocal square root estimate. + void frsqrte(const VRegister& vd, const VRegister& vn); + + // FP reciprocal square root step. + void frsqrts(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed absolute difference and accumulate long. + void sabal(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed absolute difference and accumulate long (second part). + void sabal2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned absolute difference and accumulate long. + void uabal(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned absolute difference and accumulate long (second part). + void uabal2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed absolute difference long. + void sabdl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed absolute difference long (second part). + void sabdl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned absolute difference long. + void uabdl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned absolute difference long (second part). + void uabdl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Polynomial multiply long. + void pmull(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Polynomial multiply long (second part). + void pmull2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed long multiply-add. + void smlal(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed long multiply-add (second part). + void smlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned long multiply-add. + void umlal(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned long multiply-add (second part). + void umlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed long multiply-sub. + void smlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed long multiply-sub (second part). + void smlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned long multiply-sub. + void umlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned long multiply-sub (second part). + void umlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed long multiply. + void smull(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed long multiply (second part). + void smull2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling long multiply-add. + void sqdmlal(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling long multiply-add (second part). + void sqdmlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling long multiply-subtract. + void sqdmlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling long multiply-subtract (second part). + void sqdmlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling long multiply. + void sqdmull(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling long multiply (second part). + void sqdmull2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling multiply returning high half. + void sqdmulh(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating rounding doubling multiply returning high half. + void sqrdmulh(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed dot product [Armv8.2]. + void sdot(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating rounding doubling multiply accumulate returning high + // half [Armv8.1]. + void sqrdmlah(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned dot product [Armv8.2]. + void udot(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating rounding doubling multiply subtract returning high half + // [Armv8.1]. + void sqrdmlsh(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Signed saturating doubling multiply element returning high half. + void sqdmulh(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating rounding doubling multiply element returning high half. + void sqrdmulh(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed dot product by element [Armv8.2]. + void sdot(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating rounding doubling multiply accumulate element returning + // high half [Armv8.1]. + void sqrdmlah(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned dot product by element [Armv8.2]. + void udot(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Signed saturating rounding doubling multiply subtract element returning + // high half [Armv8.1]. + void sqrdmlsh(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // Unsigned long multiply long. + void umull(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Unsigned long multiply (second part). + void umull2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Add narrow returning high half. + void addhn(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Add narrow returning high half (second part). + void addhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Rounding add narrow returning high half. + void raddhn(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Rounding add narrow returning high half (second part). + void raddhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Subtract narrow returning high half. + void subhn(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Subtract narrow returning high half (second part). + void subhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Rounding subtract narrow returning high half. + void rsubhn(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // Rounding subtract narrow returning high half (second part). + void rsubhn2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP vector multiply accumulate. + void fmla(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP fused multiply-add long to accumulator. + void fmlal(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP fused multiply-add long to accumulator (second part). + void fmlal2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP fused multiply-add long to accumulator by element. + void fmlal(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // FP fused multiply-add long to accumulator by element (second part). + void fmlal2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // FP vector multiply subtract. + void fmls(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP fused multiply-subtract long to accumulator. + void fmlsl(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP fused multiply-subtract long to accumulator (second part). + void fmlsl2(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP fused multiply-subtract long to accumulator by element. + void fmlsl(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // FP fused multiply-subtract long to accumulator by element (second part). + void fmlsl2(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // FP vector multiply extended. + void fmulx(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP absolute greater than or equal. + void facge(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP absolute greater than. + void facgt(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP multiply by element. + void fmul(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // FP fused multiply-add to accumulator by element. + void fmla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // FP fused multiply-sub from accumulator by element. + void fmls(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // FP multiply extended by element. + void fmulx(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index); + + // FP compare equal. + void fcmeq(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP greater than. + void fcmgt(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP greater than or equal. + void fcmge(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP compare equal to zero. + void fcmeq(const VRegister& vd, const VRegister& vn, double imm); + + // FP greater than zero. + void fcmgt(const VRegister& vd, const VRegister& vn, double imm); + + // FP greater than or equal to zero. + void fcmge(const VRegister& vd, const VRegister& vn, double imm); + + // FP less than or equal to zero. + void fcmle(const VRegister& vd, const VRegister& vn, double imm); + + // FP less than to zero. + void fcmlt(const VRegister& vd, const VRegister& vn, double imm); + + // FP absolute difference. + void fabd(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP pairwise add vector. + void faddp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP pairwise add scalar. + void faddp(const VRegister& vd, const VRegister& vn); + + // FP pairwise maximum vector. + void fmaxp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP pairwise maximum scalar. + void fmaxp(const VRegister& vd, const VRegister& vn); + + // FP pairwise minimum vector. + void fminp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP pairwise minimum scalar. + void fminp(const VRegister& vd, const VRegister& vn); + + // FP pairwise maximum number vector. + void fmaxnmp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP pairwise maximum number scalar. + void fmaxnmp(const VRegister& vd, const VRegister& vn); + + // FP pairwise minimum number vector. + void fminnmp(const VRegister& vd, const VRegister& vn, const VRegister& vm); + + // FP pairwise minimum number scalar. + void fminnmp(const VRegister& vd, const VRegister& vn); + + // v8.3 complex numbers - note that these are only partial/helper functions + // and must be used in series in order to perform full CN operations. + // FP complex multiply accumulate (by element) [Armv8.3]. + void fcmla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + int rot); + + // FP complex multiply accumulate [Armv8.3]. + void fcmla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int rot); + + // FP complex add [Armv8.3]. + void fcadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int rot); + + // Emit generic instructions. + // Emit raw instructions into the instruction stream. + void dci(Instr raw_inst) { Emit(raw_inst); } + + // Emit 32 bits of data into the instruction stream. + void dc32(uint32_t data) { dc(data); } + + // Emit 64 bits of data into the instruction stream. + void dc64(uint64_t data) { dc(data); } + + // Emit data in the instruction stream. + template + void dc(T data) { + VIXL_ASSERT(AllowAssembler()); + GetBuffer()->Emit(data); + } + + // Copy a string into the instruction stream, including the terminating NULL + // character. The instruction pointer is then aligned correctly for + // subsequent instructions. + void EmitString(const char* string) { + VIXL_ASSERT(string != NULL); + VIXL_ASSERT(AllowAssembler()); + + GetBuffer()->EmitString(string); + GetBuffer()->Align(); + } + + // Code generation helpers. + + // Register encoding. + static Instr Rd(CPURegister rd) { + VIXL_ASSERT(rd.GetCode() != kSPRegInternalCode); + return rd.GetCode() << Rd_offset; + } + + static Instr Rn(CPURegister rn) { + VIXL_ASSERT(rn.GetCode() != kSPRegInternalCode); + return rn.GetCode() << Rn_offset; + } + + static Instr Rm(CPURegister rm) { + VIXL_ASSERT(rm.GetCode() != kSPRegInternalCode); + return rm.GetCode() << Rm_offset; + } + + static Instr RmNot31(CPURegister rm) { + VIXL_ASSERT(rm.GetCode() != kSPRegInternalCode); + VIXL_ASSERT(!rm.IsZero()); + return Rm(rm); + } + + static Instr Ra(CPURegister ra) { + VIXL_ASSERT(ra.GetCode() != kSPRegInternalCode); + return ra.GetCode() << Ra_offset; + } + + static Instr Rt(CPURegister rt) { + VIXL_ASSERT(rt.GetCode() != kSPRegInternalCode); + return rt.GetCode() << Rt_offset; + } + + static Instr Rt2(CPURegister rt2) { + VIXL_ASSERT(rt2.GetCode() != kSPRegInternalCode); + return rt2.GetCode() << Rt2_offset; + } + + static Instr Rs(CPURegister rs) { + VIXL_ASSERT(rs.GetCode() != kSPRegInternalCode); + return rs.GetCode() << Rs_offset; + } + + // These encoding functions allow the stack pointer to be encoded, and + // disallow the zero register. + static Instr RdSP(Register rd) { + VIXL_ASSERT(!rd.IsZero()); + return (rd.GetCode() & kRegCodeMask) << Rd_offset; + } + + static Instr RnSP(Register rn) { + VIXL_ASSERT(!rn.IsZero()); + return (rn.GetCode() & kRegCodeMask) << Rn_offset; + } + + static Instr RmSP(Register rm) { + VIXL_ASSERT(!rm.IsZero()); + return (rm.GetCode() & kRegCodeMask) << Rm_offset; + } + + // Flags encoding. + static Instr Flags(FlagsUpdate S) { + if (S == SetFlags) { + return 1 << FlagsUpdate_offset; + } else if (S == LeaveFlags) { + return 0 << FlagsUpdate_offset; + } + VIXL_UNREACHABLE(); + return 0; + } + + static Instr Cond(Condition cond) { return cond << Condition_offset; } + + // PC-relative address encoding. + static Instr ImmPCRelAddress(int64_t imm21) { + VIXL_ASSERT(IsInt21(imm21)); + Instr imm = static_cast(TruncateToUint21(imm21)); + Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset; + Instr immlo = imm << ImmPCRelLo_offset; + return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask); + } + + // Branch encoding. + static Instr ImmUncondBranch(int64_t imm26) { + VIXL_ASSERT(IsInt26(imm26)); + return TruncateToUint26(imm26) << ImmUncondBranch_offset; + } + + static Instr ImmCondBranch(int64_t imm19) { + VIXL_ASSERT(IsInt19(imm19)); + return TruncateToUint19(imm19) << ImmCondBranch_offset; + } + + static Instr ImmCmpBranch(int64_t imm19) { + VIXL_ASSERT(IsInt19(imm19)); + return TruncateToUint19(imm19) << ImmCmpBranch_offset; + } + + static Instr ImmTestBranch(int64_t imm14) { + VIXL_ASSERT(IsInt14(imm14)); + return TruncateToUint14(imm14) << ImmTestBranch_offset; + } + + static Instr ImmTestBranchBit(unsigned bit_pos) { + VIXL_ASSERT(IsUint6(bit_pos)); + // Subtract five from the shift offset, as we need bit 5 from bit_pos. + unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5); + unsigned b40 = bit_pos << ImmTestBranchBit40_offset; + b5 &= ImmTestBranchBit5_mask; + b40 &= ImmTestBranchBit40_mask; + return b5 | b40; + } + + // Data Processing encoding. + static Instr SF(Register rd) { + return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits; + } + + static Instr ImmAddSub(int imm) { + VIXL_ASSERT(IsImmAddSub(imm)); + if (IsUint12(imm)) { // No shift required. + imm <<= ImmAddSub_offset; + } else { + imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset); + } + return imm; + } + + static Instr ImmS(unsigned imms, unsigned reg_size) { + VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(imms)) || + ((reg_size == kWRegSize) && IsUint5(imms))); + USE(reg_size); + return imms << ImmS_offset; + } + + static Instr ImmR(unsigned immr, unsigned reg_size) { + VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(immr)) || + ((reg_size == kWRegSize) && IsUint5(immr))); + USE(reg_size); + VIXL_ASSERT(IsUint6(immr)); + return immr << ImmR_offset; + } + + static Instr ImmSetBits(unsigned imms, unsigned reg_size) { + VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); + VIXL_ASSERT(IsUint6(imms)); + VIXL_ASSERT((reg_size == kXRegSize) || IsUint6(imms + 3)); + USE(reg_size); + return imms << ImmSetBits_offset; + } + + static Instr ImmRotate(unsigned immr, unsigned reg_size) { + VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); + VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(immr)) || + ((reg_size == kWRegSize) && IsUint5(immr))); + USE(reg_size); + return immr << ImmRotate_offset; + } + + static Instr ImmLLiteral(int64_t imm19) { + VIXL_ASSERT(IsInt19(imm19)); + return TruncateToUint19(imm19) << ImmLLiteral_offset; + } + + static Instr BitN(unsigned bitn, unsigned reg_size) { + VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); + VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0)); + USE(reg_size); + return bitn << BitN_offset; + } + + static Instr ShiftDP(Shift shift) { + VIXL_ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR); + return shift << ShiftDP_offset; + } + + static Instr ImmDPShift(unsigned amount) { + VIXL_ASSERT(IsUint6(amount)); + return amount << ImmDPShift_offset; + } + + static Instr ExtendMode(Extend extend) { return extend << ExtendMode_offset; } + + static Instr ImmExtendShift(unsigned left_shift) { + VIXL_ASSERT(left_shift <= 4); + return left_shift << ImmExtendShift_offset; + } + + static Instr ImmCondCmp(unsigned imm) { + VIXL_ASSERT(IsUint5(imm)); + return imm << ImmCondCmp_offset; + } + + static Instr Nzcv(StatusFlags nzcv) { + return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset; + } + + // MemOperand offset encoding. + static Instr ImmLSUnsigned(int64_t imm12) { + VIXL_ASSERT(IsUint12(imm12)); + return TruncateToUint12(imm12) << ImmLSUnsigned_offset; + } + + static Instr ImmLS(int64_t imm9) { + VIXL_ASSERT(IsInt9(imm9)); + return TruncateToUint9(imm9) << ImmLS_offset; + } + + static Instr ImmLSPair(int64_t imm7, unsigned access_size) { + VIXL_ASSERT(IsMultiple(imm7, 1 << access_size)); + int64_t scaled_imm7 = imm7 / (1 << access_size); + VIXL_ASSERT(IsInt7(scaled_imm7)); + return TruncateToUint7(scaled_imm7) << ImmLSPair_offset; + } + + static Instr ImmShiftLS(unsigned shift_amount) { + VIXL_ASSERT(IsUint1(shift_amount)); + return shift_amount << ImmShiftLS_offset; + } + + static Instr ImmLSPAC(int64_t imm10) { + VIXL_ASSERT(IsMultiple(imm10, 1 << 3)); + int64_t scaled_imm10 = imm10 / (1 << 3); + VIXL_ASSERT(IsInt10(scaled_imm10)); + uint32_t s_bit = (scaled_imm10 >> 9) & 1; + return (s_bit << ImmLSPACHi_offset) | + (TruncateToUint9(scaled_imm10) << ImmLSPACLo_offset); + } + + static Instr ImmPrefetchOperation(int imm5) { + VIXL_ASSERT(IsUint5(imm5)); + return imm5 << ImmPrefetchOperation_offset; + } + + static Instr ImmException(int imm16) { + VIXL_ASSERT(IsUint16(imm16)); + return imm16 << ImmException_offset; + } + + static Instr ImmSystemRegister(int imm16) { + VIXL_ASSERT(IsUint16(imm16)); + return imm16 << ImmSystemRegister_offset; + } + + static Instr ImmRMIFRotation(int imm6) { + VIXL_ASSERT(IsUint6(imm6)); + return imm6 << ImmRMIFRotation_offset; + } + + static Instr ImmHint(int imm7) { + VIXL_ASSERT(IsUint7(imm7)); + return imm7 << ImmHint_offset; + } + + static Instr CRm(int imm4) { + VIXL_ASSERT(IsUint4(imm4)); + return imm4 << CRm_offset; + } + + static Instr CRn(int imm4) { + VIXL_ASSERT(IsUint4(imm4)); + return imm4 << CRn_offset; + } + + static Instr SysOp(int imm14) { + VIXL_ASSERT(IsUint14(imm14)); + return imm14 << SysOp_offset; + } + + static Instr ImmSysOp1(int imm3) { + VIXL_ASSERT(IsUint3(imm3)); + return imm3 << SysOp1_offset; + } + + static Instr ImmSysOp2(int imm3) { + VIXL_ASSERT(IsUint3(imm3)); + return imm3 << SysOp2_offset; + } + + static Instr ImmBarrierDomain(int imm2) { + VIXL_ASSERT(IsUint2(imm2)); + return imm2 << ImmBarrierDomain_offset; + } + + static Instr ImmBarrierType(int imm2) { + VIXL_ASSERT(IsUint2(imm2)); + return imm2 << ImmBarrierType_offset; + } + + // Move immediates encoding. + static Instr ImmMoveWide(uint64_t imm) { + VIXL_ASSERT(IsUint16(imm)); + return static_cast(imm << ImmMoveWide_offset); + } + + static Instr ShiftMoveWide(int64_t shift) { + VIXL_ASSERT(IsUint2(shift)); + return static_cast(shift << ShiftMoveWide_offset); + } + + // FP Immediates. + static Instr ImmFP16(Float16 imm); + static Instr ImmFP32(float imm); + static Instr ImmFP64(double imm); + + // FP register type. + static Instr FPType(FPRegister fd) { + switch (fd.GetSizeInBits()) { + case 16: + return FP16; + case 32: + return FP32; + case 64: + return FP64; + default: + VIXL_UNREACHABLE(); + return 0; + } + } + + static Instr FPScale(unsigned scale) { + VIXL_ASSERT(IsUint6(scale)); + return scale << FPScale_offset; + } + + // Immediate field checking helpers. + static bool IsImmAddSub(int64_t immediate); + static bool IsImmConditionalCompare(int64_t immediate); + static bool IsImmFP16(Float16 imm); + static bool IsImmFP32(float imm); + static bool IsImmFP64(double imm); + static bool IsImmLogical(uint64_t value, + unsigned width, + unsigned* n = NULL, + unsigned* imm_s = NULL, + unsigned* imm_r = NULL); + static bool IsImmLSPair(int64_t offset, unsigned access_size); + static bool IsImmLSScaled(int64_t offset, unsigned access_size); + static bool IsImmLSUnscaled(int64_t offset); + static bool IsImmMovn(uint64_t imm, unsigned reg_size); + static bool IsImmMovz(uint64_t imm, unsigned reg_size); + + // Instruction bits for vector format in data processing operations. + static Instr VFormat(VRegister vd) { + if (vd.Is64Bits()) { + switch (vd.GetLanes()) { + case 2: + return NEON_2S; + case 4: + return NEON_4H; + case 8: + return NEON_8B; + default: + return 0xffffffff; + } + } else { + VIXL_ASSERT(vd.Is128Bits()); + switch (vd.GetLanes()) { + case 2: + return NEON_2D; + case 4: + return NEON_4S; + case 8: + return NEON_8H; + case 16: + return NEON_16B; + default: + return 0xffffffff; + } + } + } + + // Instruction bits for vector format in floating point data processing + // operations. + static Instr FPFormat(VRegister vd) { + switch (vd.GetLanes()) { + case 1: + // Floating point scalar formats. + switch (vd.GetSizeInBits()) { + case 16: + return FP16; + case 32: + return FP32; + case 64: + return FP64; + default: + VIXL_UNREACHABLE(); + } + break; + case 2: + // Two lane floating point vector formats. + switch (vd.GetSizeInBits()) { + case 64: + return NEON_FP_2S; + case 128: + return NEON_FP_2D; + default: + VIXL_UNREACHABLE(); + } + break; + case 4: + // Four lane floating point vector formats. + switch (vd.GetSizeInBits()) { + case 64: + return NEON_FP_4H; + case 128: + return NEON_FP_4S; + default: + VIXL_UNREACHABLE(); + } + break; + case 8: + // Eight lane floating point vector format. + VIXL_ASSERT(vd.Is128Bits()); + return NEON_FP_8H; + default: + VIXL_UNREACHABLE(); + return 0; + } + VIXL_UNREACHABLE(); + return 0; + } + + // Instruction bits for vector format in load and store operations. + static Instr LSVFormat(VRegister vd) { + if (vd.Is64Bits()) { + switch (vd.GetLanes()) { + case 1: + return LS_NEON_1D; + case 2: + return LS_NEON_2S; + case 4: + return LS_NEON_4H; + case 8: + return LS_NEON_8B; + default: + return 0xffffffff; + } + } else { + VIXL_ASSERT(vd.Is128Bits()); + switch (vd.GetLanes()) { + case 2: + return LS_NEON_2D; + case 4: + return LS_NEON_4S; + case 8: + return LS_NEON_8H; + case 16: + return LS_NEON_16B; + default: + return 0xffffffff; + } + } + } + + // Instruction bits for scalar format in data processing operations. + static Instr SFormat(VRegister vd) { + VIXL_ASSERT(vd.GetLanes() == 1); + switch (vd.GetSizeInBytes()) { + case 1: + return NEON_B; + case 2: + return NEON_H; + case 4: + return NEON_S; + case 8: + return NEON_D; + default: + return 0xffffffff; + } + } + + static Instr ImmNEONHLM(int index, int num_bits) { + int h, l, m; + if (num_bits == 3) { + VIXL_ASSERT(IsUint3(index)); + h = (index >> 2) & 1; + l = (index >> 1) & 1; + m = (index >> 0) & 1; + } else if (num_bits == 2) { + VIXL_ASSERT(IsUint2(index)); + h = (index >> 1) & 1; + l = (index >> 0) & 1; + m = 0; + } else { + VIXL_ASSERT(IsUint1(index) && (num_bits == 1)); + h = (index >> 0) & 1; + l = 0; + m = 0; + } + return (h << NEONH_offset) | (l << NEONL_offset) | (m << NEONM_offset); + } + + static Instr ImmRotFcadd(int rot) { + VIXL_ASSERT(rot == 90 || rot == 270); + return (((rot == 270) ? 1 : 0) << ImmRotFcadd_offset); + } + + static Instr ImmRotFcmlaSca(int rot) { + VIXL_ASSERT(rot == 0 || rot == 90 || rot == 180 || rot == 270); + return (rot / 90) << ImmRotFcmlaSca_offset; + } + + static Instr ImmRotFcmlaVec(int rot) { + VIXL_ASSERT(rot == 0 || rot == 90 || rot == 180 || rot == 270); + return (rot / 90) << ImmRotFcmlaVec_offset; + } + + static Instr ImmNEONExt(int imm4) { + VIXL_ASSERT(IsUint4(imm4)); + return imm4 << ImmNEONExt_offset; + } + + static Instr ImmNEON5(Instr format, int index) { + VIXL_ASSERT(IsUint4(index)); + int s = LaneSizeInBytesLog2FromFormat(static_cast(format)); + int imm5 = (index << (s + 1)) | (1 << s); + return imm5 << ImmNEON5_offset; + } + + static Instr ImmNEON4(Instr format, int index) { + VIXL_ASSERT(IsUint4(index)); + int s = LaneSizeInBytesLog2FromFormat(static_cast(format)); + int imm4 = index << s; + return imm4 << ImmNEON4_offset; + } + + static Instr ImmNEONabcdefgh(int imm8) { + VIXL_ASSERT(IsUint8(imm8)); + Instr instr; + instr = ((imm8 >> 5) & 7) << ImmNEONabc_offset; + instr |= (imm8 & 0x1f) << ImmNEONdefgh_offset; + return instr; + } + + static Instr NEONCmode(int cmode) { + VIXL_ASSERT(IsUint4(cmode)); + return cmode << NEONCmode_offset; + } + + static Instr NEONModImmOp(int op) { + VIXL_ASSERT(IsUint1(op)); + return op << NEONModImmOp_offset; + } + + // Size of the code generated since label to the current position. + size_t GetSizeOfCodeGeneratedSince(Label* label) const { + VIXL_ASSERT(label->IsBound()); + return GetBuffer().GetOffsetFrom(label->GetLocation()); + } + VIXL_DEPRECATED("GetSizeOfCodeGeneratedSince", + size_t SizeOfCodeGeneratedSince(Label* label) const) { + return GetSizeOfCodeGeneratedSince(label); + } + + VIXL_DEPRECATED("GetBuffer().GetCapacity()", + size_t GetBufferCapacity() const) { + return GetBuffer().GetCapacity(); + } + VIXL_DEPRECATED("GetBuffer().GetCapacity()", size_t BufferCapacity() const) { + return GetBuffer().GetCapacity(); + } + + VIXL_DEPRECATED("GetBuffer().GetRemainingBytes()", + size_t GetRemainingBufferSpace() const) { + return GetBuffer().GetRemainingBytes(); + } + VIXL_DEPRECATED("GetBuffer().GetRemainingBytes()", + size_t RemainingBufferSpace() const) { + return GetBuffer().GetRemainingBytes(); + } + + PositionIndependentCodeOption GetPic() const { return pic_; } + VIXL_DEPRECATED("GetPic", PositionIndependentCodeOption pic() const) { + return GetPic(); + } + + CPUFeatures* GetCPUFeatures() { return &cpu_features_; } + + void SetCPUFeatures(const CPUFeatures& cpu_features) { + cpu_features_ = cpu_features; + } + + bool AllowPageOffsetDependentCode() const { + return (GetPic() == PageOffsetDependentCode) || + (GetPic() == PositionDependentCode); + } + + static Register AppropriateZeroRegFor(const CPURegister& reg) { + return reg.Is64Bits() ? Register(xzr) : Register(wzr); + } + + protected: + void LoadStore(const CPURegister& rt, + const MemOperand& addr, + LoadStoreOp op, + LoadStoreScalingOption option = PreferScaledOffset); + + void LoadStorePAC(const Register& xt, + const MemOperand& addr, + LoadStorePACOp op); + + void LoadStorePair(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairOp op); + void LoadStoreStruct(const VRegister& vt, + const MemOperand& addr, + NEONLoadStoreMultiStructOp op); + void LoadStoreStruct1(const VRegister& vt, + int reg_count, + const MemOperand& addr); + void LoadStoreStructSingle(const VRegister& vt, + uint32_t lane, + const MemOperand& addr, + NEONLoadStoreSingleStructOp op); + void LoadStoreStructSingleAllLanes(const VRegister& vt, + const MemOperand& addr, + NEONLoadStoreSingleStructOp op); + void LoadStoreStructVerify(const VRegister& vt, + const MemOperand& addr, + Instr op); + + void Prefetch(PrefetchOperation op, + const MemOperand& addr, + LoadStoreScalingOption option = PreferScaledOffset); + + // TODO(all): The third parameter should be passed by reference but gcc 4.8.2 + // reports a bogus uninitialised warning then. + void Logical(const Register& rd, + const Register& rn, + const Operand operand, + LogicalOp op); + void LogicalImmediate(const Register& rd, + const Register& rn, + unsigned n, + unsigned imm_s, + unsigned imm_r, + LogicalOp op); + + void ConditionalCompare(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond, + ConditionalCompareOp op); + + void AddSubWithCarry(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubWithCarryOp op); + + + // Functions for emulating operands not directly supported by the instruction + // set. + void EmitShift(const Register& rd, + const Register& rn, + Shift shift, + unsigned amount); + void EmitExtendShift(const Register& rd, + const Register& rn, + Extend extend, + unsigned left_shift); + + void AddSub(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubOp op); + + void NEONTable(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEONTableOp op); + + // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified + // registers. Only simple loads are supported; sign- and zero-extension (such + // as in LDPSW_x or LDRB_w) are not supported. + static LoadStoreOp LoadOpFor(const CPURegister& rt); + static LoadStorePairOp LoadPairOpFor(const CPURegister& rt, + const CPURegister& rt2); + static LoadStoreOp StoreOpFor(const CPURegister& rt); + static LoadStorePairOp StorePairOpFor(const CPURegister& rt, + const CPURegister& rt2); + static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor( + const CPURegister& rt, const CPURegister& rt2); + static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor( + const CPURegister& rt, const CPURegister& rt2); + static LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt); + + // Convenience pass-through for CPU feature checks. + bool CPUHas(CPUFeatures::Feature feature0, + CPUFeatures::Feature feature1 = CPUFeatures::kNone, + CPUFeatures::Feature feature2 = CPUFeatures::kNone, + CPUFeatures::Feature feature3 = CPUFeatures::kNone) const { + return cpu_features_.Has(feature0, feature1, feature2, feature3); + } + + // Determine whether the target CPU has the specified registers, based on the + // currently-enabled CPU features. Presence of a register does not imply + // support for arbitrary operations on it. For example, CPUs with FP have H + // registers, but most half-precision operations require the FPHalf feature. + // + // These are used to check CPU features in loads and stores that have the same + // entry point for both integer and FP registers. + bool CPUHas(const CPURegister& rt) const; + bool CPUHas(const CPURegister& rt, const CPURegister& rt2) const; + + private: + static uint32_t FP16ToImm8(Float16 imm); + static uint32_t FP32ToImm8(float imm); + static uint32_t FP64ToImm8(double imm); + + // Instruction helpers. + void MoveWide(const Register& rd, + uint64_t imm, + int shift, + MoveWideImmediateOp mov_op); + void DataProcShiftedRegister(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + Instr op); + void DataProcExtendedRegister(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + Instr op); + void LoadStorePairNonTemporal(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairNonTemporalOp op); + void LoadLiteral(const CPURegister& rt, uint64_t imm, LoadLiteralOp op); + void ConditionalSelect(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond, + ConditionalSelectOp op); + void DataProcessing1Source(const Register& rd, + const Register& rn, + DataProcessing1SourceOp op); + void DataProcessing3Source(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra, + DataProcessing3SourceOp op); + void FPDataProcessing1Source(const VRegister& fd, + const VRegister& fn, + FPDataProcessing1SourceOp op); + void FPDataProcessing3Source(const VRegister& fd, + const VRegister& fn, + const VRegister& fm, + const VRegister& fa, + FPDataProcessing3SourceOp op); + void NEONAcrossLanesL(const VRegister& vd, + const VRegister& vn, + NEONAcrossLanesOp op); + void NEONAcrossLanes(const VRegister& vd, + const VRegister& vn, + NEONAcrossLanesOp op, + Instr op_half); + void NEONModifiedImmShiftLsl(const VRegister& vd, + const int imm8, + const int left_shift, + NEONModifiedImmediateOp op); + void NEONModifiedImmShiftMsl(const VRegister& vd, + const int imm8, + const int shift_amount, + NEONModifiedImmediateOp op); + void NEONFP2Same(const VRegister& vd, const VRegister& vn, Instr vop); + void NEON3Same(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3SameOp vop); + void NEON3SameFP16(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Instr op); + void NEONFP3Same(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Instr op); + void NEON3DifferentL(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3DifferentOp vop); + void NEON3DifferentW(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3DifferentOp vop); + void NEON3DifferentHN(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEON3DifferentOp vop); + void NEONFP2RegMisc(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp vop, + double value = 0.0); + void NEONFP2RegMiscFP16(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscFP16Op vop, + double value = 0.0); + void NEON2RegMisc(const VRegister& vd, + const VRegister& vn, + NEON2RegMiscOp vop, + int value = 0); + void NEONFP2RegMisc(const VRegister& vd, const VRegister& vn, Instr op); + void NEONFP2RegMiscFP16(const VRegister& vd, const VRegister& vn, Instr op); + void NEONAddlp(const VRegister& vd, const VRegister& vn, NEON2RegMiscOp op); + void NEONPerm(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + NEONPermOp op); + void NEONFPByElement(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + NEONByIndexedElementOp op, + NEONByIndexedElementOp op_half); + void NEONByElement(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + NEONByIndexedElementOp op); + void NEONByElementL(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + NEONByIndexedElementOp op); + void NEONShiftImmediate(const VRegister& vd, + const VRegister& vn, + NEONShiftImmediateOp op, + int immh_immb); + void NEONShiftLeftImmediate(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op); + void NEONShiftRightImmediate(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op); + void NEONShiftImmediateL(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op); + void NEONShiftImmediateN(const VRegister& vd, + const VRegister& vn, + int shift, + NEONShiftImmediateOp op); + void NEONXtn(const VRegister& vd, const VRegister& vn, NEON2RegMiscOp vop); + + Instr LoadStoreStructAddrModeField(const MemOperand& addr); + + // Encode the specified MemOperand for the specified access size and scaling + // preference. + Instr LoadStoreMemOperand(const MemOperand& addr, + unsigned access_size, + LoadStoreScalingOption option); + + // Link the current (not-yet-emitted) instruction to the specified label, then + // return an offset to be encoded in the instruction. If the label is not yet + // bound, an offset of 0 is returned. + ptrdiff_t LinkAndGetByteOffsetTo(Label* label); + ptrdiff_t LinkAndGetInstructionOffsetTo(Label* label); + ptrdiff_t LinkAndGetPageOffsetTo(Label* label); + + // A common implementation for the LinkAndGetOffsetTo helpers. + template + ptrdiff_t LinkAndGetOffsetTo(Label* label); + + // Literal load offset are in words (32-bit). + ptrdiff_t LinkAndGetWordOffsetTo(RawLiteral* literal); + + // Emit the instruction in buffer_. + void Emit(Instr instruction) { + VIXL_STATIC_ASSERT(sizeof(instruction) == kInstructionSize); + VIXL_ASSERT(AllowAssembler()); + GetBuffer()->Emit32(instruction); + } + + PositionIndependentCodeOption pic_; + + CPUFeatures cpu_features_; +}; + + +template +void Literal::UpdateValue(T new_value, const Assembler* assembler) { + return UpdateValue(new_value, + assembler->GetBuffer().GetStartAddress()); +} + + +template +void Literal::UpdateValue(T high64, T low64, const Assembler* assembler) { + return UpdateValue(high64, + low64, + assembler->GetBuffer().GetStartAddress()); +} + + +} // namespace aarch64 + +// Required InvalSet template specialisations. +// TODO: These template specialisations should not live in this file. Move +// Label out of the aarch64 namespace in order to share its implementation +// later. +#define INVAL_SET_TEMPLATE_PARAMETERS \ + ptrdiff_t, aarch64::Label::kNPreallocatedLinks, ptrdiff_t, \ + aarch64::Label::kInvalidLinkKey, aarch64::Label::kReclaimFrom, \ + aarch64::Label::kReclaimFactor +template <> +inline ptrdiff_t InvalSet::GetKey( + const ptrdiff_t& element) { + return element; +} +template <> +inline void InvalSet::SetKey(ptrdiff_t* element, + ptrdiff_t key) { + *element = key; +} +#undef INVAL_SET_TEMPLATE_PARAMETERS + +} // namespace vixl + +#endif // VIXL_AARCH64_ASSEMBLER_AARCH64_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/constants-aarch64.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/constants-aarch64.h new file mode 100644 index 00000000..bbee5d82 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/constants-aarch64.h @@ -0,0 +1,2661 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_CONSTANTS_AARCH64_H_ +#define VIXL_AARCH64_CONSTANTS_AARCH64_H_ + +#include "../globals-vixl.h" + +namespace vixl { +namespace aarch64 { + +const unsigned kNumberOfRegisters = 32; +const unsigned kNumberOfVRegisters = 32; +const unsigned kNumberOfFPRegisters = kNumberOfVRegisters; +// Callee saved registers are x21-x30(lr). +const int kNumberOfCalleeSavedRegisters = 10; +const int kFirstCalleeSavedRegisterIndex = 21; +// Callee saved FP registers are d8-d15. +const int kNumberOfCalleeSavedFPRegisters = 8; +const int kFirstCalleeSavedFPRegisterIndex = 8; + +// clang-format off +#define AARCH64_REGISTER_CODE_LIST(R) \ + R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ + R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \ + R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \ + R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31) + +#define INSTRUCTION_FIELDS_LIST(V_) \ +/* Register fields */ \ +V_(Rd, 4, 0, ExtractBits) /* Destination register. */ \ +V_(Rn, 9, 5, ExtractBits) /* First source register. */ \ +V_(Rm, 20, 16, ExtractBits) /* Second source register. */ \ +V_(RmLow16, 19, 16, ExtractBits) /* Second source register (code 0-15). */ \ +V_(Ra, 14, 10, ExtractBits) /* Third source register. */ \ +V_(Rt, 4, 0, ExtractBits) /* Load/store register. */ \ +V_(Rt2, 14, 10, ExtractBits) /* Load/store second register. */ \ +V_(Rs, 20, 16, ExtractBits) /* Exclusive access status. */ \ + \ +/* Common bits */ \ +V_(SixtyFourBits, 31, 31, ExtractBits) \ +V_(FlagsUpdate, 29, 29, ExtractBits) \ + \ +/* PC relative addressing */ \ +V_(ImmPCRelHi, 23, 5, ExtractSignedBits) \ +V_(ImmPCRelLo, 30, 29, ExtractBits) \ + \ +/* Add/subtract/logical shift register */ \ +V_(ShiftDP, 23, 22, ExtractBits) \ +V_(ImmDPShift, 15, 10, ExtractBits) \ + \ +/* Add/subtract immediate */ \ +V_(ImmAddSub, 21, 10, ExtractBits) \ +V_(ShiftAddSub, 23, 22, ExtractBits) \ + \ +/* Add/substract extend */ \ +V_(ImmExtendShift, 12, 10, ExtractBits) \ +V_(ExtendMode, 15, 13, ExtractBits) \ + \ +/* Move wide */ \ +V_(ImmMoveWide, 20, 5, ExtractBits) \ +V_(ShiftMoveWide, 22, 21, ExtractBits) \ + \ +/* Logical immediate, bitfield and extract */ \ +V_(BitN, 22, 22, ExtractBits) \ +V_(ImmRotate, 21, 16, ExtractBits) \ +V_(ImmSetBits, 15, 10, ExtractBits) \ +V_(ImmR, 21, 16, ExtractBits) \ +V_(ImmS, 15, 10, ExtractBits) \ + \ +/* Test and branch immediate */ \ +V_(ImmTestBranch, 18, 5, ExtractSignedBits) \ +V_(ImmTestBranchBit40, 23, 19, ExtractBits) \ +V_(ImmTestBranchBit5, 31, 31, ExtractBits) \ + \ +/* Conditionals */ \ +V_(Condition, 15, 12, ExtractBits) \ +V_(ConditionBranch, 3, 0, ExtractBits) \ +V_(Nzcv, 3, 0, ExtractBits) \ +V_(ImmCondCmp, 20, 16, ExtractBits) \ +V_(ImmCondBranch, 23, 5, ExtractSignedBits) \ + \ +/* Floating point */ \ +V_(FPType, 23, 22, ExtractBits) \ +V_(ImmFP, 20, 13, ExtractBits) \ +V_(FPScale, 15, 10, ExtractBits) \ + \ +/* Load Store */ \ +V_(ImmLS, 20, 12, ExtractSignedBits) \ +V_(ImmLSUnsigned, 21, 10, ExtractBits) \ +V_(ImmLSPair, 21, 15, ExtractSignedBits) \ +V_(ImmShiftLS, 12, 12, ExtractBits) \ +V_(LSOpc, 23, 22, ExtractBits) \ +V_(LSVector, 26, 26, ExtractBits) \ +V_(LSSize, 31, 30, ExtractBits) \ +V_(ImmPrefetchOperation, 4, 0, ExtractBits) \ +V_(PrefetchHint, 4, 3, ExtractBits) \ +V_(PrefetchTarget, 2, 1, ExtractBits) \ +V_(PrefetchStream, 0, 0, ExtractBits) \ +V_(ImmLSPACHi, 22, 22, ExtractSignedBits) \ +V_(ImmLSPACLo, 20, 12, ExtractBits) \ + \ +/* Other immediates */ \ +V_(ImmUncondBranch, 25, 0, ExtractSignedBits) \ +V_(ImmCmpBranch, 23, 5, ExtractSignedBits) \ +V_(ImmLLiteral, 23, 5, ExtractSignedBits) \ +V_(ImmException, 20, 5, ExtractBits) \ +V_(ImmHint, 11, 5, ExtractBits) \ +V_(ImmBarrierDomain, 11, 10, ExtractBits) \ +V_(ImmBarrierType, 9, 8, ExtractBits) \ + \ +/* System (MRS, MSR, SYS) */ \ +V_(ImmSystemRegister, 20, 5, ExtractBits) \ +V_(SysO0, 19, 19, ExtractBits) \ +V_(SysOp, 18, 5, ExtractBits) \ +V_(SysOp0, 20, 19, ExtractBits) \ +V_(SysOp1, 18, 16, ExtractBits) \ +V_(SysOp2, 7, 5, ExtractBits) \ +V_(CRn, 15, 12, ExtractBits) \ +V_(CRm, 11, 8, ExtractBits) \ +V_(ImmRMIFRotation, 20, 15, ExtractBits) \ + \ +/* Load-/store-exclusive */ \ +V_(LdStXLoad, 22, 22, ExtractBits) \ +V_(LdStXNotExclusive, 23, 23, ExtractBits) \ +V_(LdStXAcquireRelease, 15, 15, ExtractBits) \ +V_(LdStXSizeLog2, 31, 30, ExtractBits) \ +V_(LdStXPair, 21, 21, ExtractBits) \ + \ +/* NEON generic fields */ \ +V_(NEONQ, 30, 30, ExtractBits) \ +V_(NEONSize, 23, 22, ExtractBits) \ +V_(NEONLSSize, 11, 10, ExtractBits) \ +V_(NEONS, 12, 12, ExtractBits) \ +V_(NEONL, 21, 21, ExtractBits) \ +V_(NEONM, 20, 20, ExtractBits) \ +V_(NEONH, 11, 11, ExtractBits) \ +V_(ImmNEONExt, 14, 11, ExtractBits) \ +V_(ImmNEON5, 20, 16, ExtractBits) \ +V_(ImmNEON4, 14, 11, ExtractBits) \ + \ +/* NEON extra fields */ \ +V_(ImmRotFcadd, 12, 12, ExtractBits) \ +V_(ImmRotFcmlaVec, 12, 11, ExtractBits) \ +V_(ImmRotFcmlaSca, 14, 13, ExtractBits) \ + \ +/* NEON Modified Immediate fields */ \ +V_(ImmNEONabc, 18, 16, ExtractBits) \ +V_(ImmNEONdefgh, 9, 5, ExtractBits) \ +V_(NEONModImmOp, 29, 29, ExtractBits) \ +V_(NEONCmode, 15, 12, ExtractBits) \ + \ +/* NEON Shift Immediate fields */ \ +V_(ImmNEONImmhImmb, 22, 16, ExtractBits) \ +V_(ImmNEONImmh, 22, 19, ExtractBits) \ +V_(ImmNEONImmb, 18, 16, ExtractBits) +// clang-format on + +#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \ + /* NZCV */ \ + V_(Flags, 31, 28, ExtractBits) \ + V_(N, 31, 31, ExtractBits) \ + V_(Z, 30, 30, ExtractBits) \ + V_(C, 29, 29, ExtractBits) \ + V_(V, 28, 28, ExtractBits) \ + M_(NZCV, Flags_mask) \ + /* FPCR */ \ + V_(AHP, 26, 26, ExtractBits) \ + V_(DN, 25, 25, ExtractBits) \ + V_(FZ, 24, 24, ExtractBits) \ + V_(RMode, 23, 22, ExtractBits) \ + M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask) + +// Fields offsets. +#define DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, X) \ + const int Name##_offset = LowBit; \ + const int Name##_width = HighBit - LowBit + 1; \ + const uint32_t Name##_mask = ((1 << Name##_width) - 1) << LowBit; +#define NOTHING(A, B) +INSTRUCTION_FIELDS_LIST(DECLARE_FIELDS_OFFSETS) +SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING) +#undef NOTHING +#undef DECLARE_FIELDS_BITS + +// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), formed +// from ImmPCRelLo and ImmPCRelHi. +const int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask; + +// Disable `clang-format` for the `enum`s below. We care about the manual +// formatting that `clang-format` would destroy. +// clang-format off + +// Condition codes. +enum Condition { + eq = 0, // Z set Equal. + ne = 1, // Z clear Not equal. + cs = 2, // C set Carry set. + cc = 3, // C clear Carry clear. + mi = 4, // N set Negative. + pl = 5, // N clear Positive or zero. + vs = 6, // V set Overflow. + vc = 7, // V clear No overflow. + hi = 8, // C set, Z clear Unsigned higher. + ls = 9, // C clear or Z set Unsigned lower or same. + ge = 10, // N == V Greater or equal. + lt = 11, // N != V Less than. + gt = 12, // Z clear, N == V Greater than. + le = 13, // Z set or N != V Less then or equal + al = 14, // Always. + nv = 15, // Behaves as always/al. + + // Aliases. + hs = cs, // C set Unsigned higher or same. + lo = cc // C clear Unsigned lower. +}; + +inline Condition InvertCondition(Condition cond) { + // Conditions al and nv behave identically, as "always true". They can't be + // inverted, because there is no "always false" condition. + VIXL_ASSERT((cond != al) && (cond != nv)); + return static_cast(cond ^ 1); +} + +enum FPTrapFlags { + EnableTrap = 1, + DisableTrap = 0 +}; + +enum FlagsUpdate { + SetFlags = 1, + LeaveFlags = 0 +}; + +enum StatusFlags { + NoFlag = 0, + + // Derive the flag combinations from the system register bit descriptions. + NFlag = N_mask, + ZFlag = Z_mask, + CFlag = C_mask, + VFlag = V_mask, + NZFlag = NFlag | ZFlag, + NCFlag = NFlag | CFlag, + NVFlag = NFlag | VFlag, + ZCFlag = ZFlag | CFlag, + ZVFlag = ZFlag | VFlag, + CVFlag = CFlag | VFlag, + NZCFlag = NFlag | ZFlag | CFlag, + NZVFlag = NFlag | ZFlag | VFlag, + NCVFlag = NFlag | CFlag | VFlag, + ZCVFlag = ZFlag | CFlag | VFlag, + NZCVFlag = NFlag | ZFlag | CFlag | VFlag, + + // Floating-point comparison results. + FPEqualFlag = ZCFlag, + FPLessThanFlag = NFlag, + FPGreaterThanFlag = CFlag, + FPUnorderedFlag = CVFlag +}; + +enum Shift { + NO_SHIFT = -1, + LSL = 0x0, + LSR = 0x1, + ASR = 0x2, + ROR = 0x3, + MSL = 0x4 +}; + +enum Extend { + NO_EXTEND = -1, + UXTB = 0, + UXTH = 1, + UXTW = 2, + UXTX = 3, + SXTB = 4, + SXTH = 5, + SXTW = 6, + SXTX = 7 +}; + +enum SystemHint { + NOP = 0, + YIELD = 1, + WFE = 2, + WFI = 3, + SEV = 4, + SEVL = 5, + ESB = 16, + CSDB = 20, + BTI = 32, + BTI_c = 34, + BTI_j = 36, + BTI_jc = 38 +}; + +enum BranchTargetIdentifier { + EmitBTI_none = NOP, + EmitBTI = BTI, + EmitBTI_c = BTI_c, + EmitBTI_j = BTI_j, + EmitBTI_jc = BTI_jc, + + // These correspond to the values of the CRm:op2 fields in the equivalent HINT + // instruction. + EmitPACIASP = 25, + EmitPACIBSP = 27 +}; + +enum BarrierDomain { + OuterShareable = 0, + NonShareable = 1, + InnerShareable = 2, + FullSystem = 3 +}; + +enum BarrierType { + BarrierOther = 0, + BarrierReads = 1, + BarrierWrites = 2, + BarrierAll = 3 +}; + +enum PrefetchOperation { + PLDL1KEEP = 0x00, + PLDL1STRM = 0x01, + PLDL2KEEP = 0x02, + PLDL2STRM = 0x03, + PLDL3KEEP = 0x04, + PLDL3STRM = 0x05, + + PLIL1KEEP = 0x08, + PLIL1STRM = 0x09, + PLIL2KEEP = 0x0a, + PLIL2STRM = 0x0b, + PLIL3KEEP = 0x0c, + PLIL3STRM = 0x0d, + + PSTL1KEEP = 0x10, + PSTL1STRM = 0x11, + PSTL2KEEP = 0x12, + PSTL2STRM = 0x13, + PSTL3KEEP = 0x14, + PSTL3STRM = 0x15 +}; + +enum BType { + // Set when executing any instruction on a guarded page, except those cases + // listed below. + DefaultBType = 0, + + // Set when an indirect branch is taken from an unguarded page to a guarded + // page, or from a guarded page to ip0 or ip1 (x16 or x17), eg "br ip0". + BranchFromUnguardedOrToIP = 1, + + // Set when an indirect branch and link (call) is taken, eg. "blr x0". + BranchAndLink = 2, + + // Set when an indirect branch is taken from a guarded page to a register + // that is not ip0 or ip1 (x16 or x17), eg, "br x0". + BranchFromGuardedNotToIP = 3 +}; + +template +class SystemRegisterEncoder { + public: + static const uint32_t value = + ((op0 << SysO0_offset) | + (op1 << SysOp1_offset) | + (crn << CRn_offset) | + (crm << CRm_offset) | + (op2 << SysOp2_offset)) >> ImmSystemRegister_offset; +}; + +// System/special register names. +// This information is not encoded as one field but as the concatenation of +// multiple fields (Op0<0>, Op1, Crn, Crm, Op2). +enum SystemRegister { + NZCV = SystemRegisterEncoder<3, 3, 4, 2, 0>::value, + FPCR = SystemRegisterEncoder<3, 3, 4, 4, 0>::value +}; + +template +class CacheOpEncoder { + public: + static const uint32_t value = + ((op1 << SysOp1_offset) | + (crn << CRn_offset) | + (crm << CRm_offset) | + (op2 << SysOp2_offset)) >> SysOp_offset; +}; + +enum InstructionCacheOp { + IVAU = CacheOpEncoder<3, 7, 5, 1>::value +}; + +enum DataCacheOp { + CVAC = CacheOpEncoder<3, 7, 10, 1>::value, + CVAU = CacheOpEncoder<3, 7, 11, 1>::value, + CVAP = CacheOpEncoder<3, 7, 12, 1>::value, + CIVAC = CacheOpEncoder<3, 7, 14, 1>::value, + ZVA = CacheOpEncoder<3, 7, 4, 1>::value +}; + +// Instruction enumerations. +// +// These are the masks that define a class of instructions, and the list of +// instructions within each class. Each enumeration has a Fixed, FMask and +// Mask value. +// +// Fixed: The fixed bits in this instruction class. +// FMask: The mask used to extract the fixed bits in the class. +// Mask: The mask used to identify the instructions within a class. +// +// The enumerations can be used like this: +// +// VIXL_ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed); +// switch(instr->Mask(PCRelAddressingMask)) { +// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break; +// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break; +// default: printf("Unknown instruction\n"); +// } + + +// Generic fields. +enum GenericInstrField { + SixtyFourBits = 0x80000000, + ThirtyTwoBits = 0x00000000, + + FPTypeMask = 0x00C00000, + FP16 = 0x00C00000, + FP32 = 0x00000000, + FP64 = 0x00400000 +}; + +enum NEONFormatField { + NEONFormatFieldMask = 0x40C00000, + NEON_Q = 0x40000000, + NEON_8B = 0x00000000, + NEON_16B = NEON_8B | NEON_Q, + NEON_4H = 0x00400000, + NEON_8H = NEON_4H | NEON_Q, + NEON_2S = 0x00800000, + NEON_4S = NEON_2S | NEON_Q, + NEON_1D = 0x00C00000, + NEON_2D = 0x00C00000 | NEON_Q +}; + +enum NEONFPFormatField { + NEONFPFormatFieldMask = 0x40400000, + NEON_FP_4H = FP16, + NEON_FP_2S = FP32, + NEON_FP_8H = FP16 | NEON_Q, + NEON_FP_4S = FP32 | NEON_Q, + NEON_FP_2D = FP64 | NEON_Q +}; + +enum NEONLSFormatField { + NEONLSFormatFieldMask = 0x40000C00, + LS_NEON_8B = 0x00000000, + LS_NEON_16B = LS_NEON_8B | NEON_Q, + LS_NEON_4H = 0x00000400, + LS_NEON_8H = LS_NEON_4H | NEON_Q, + LS_NEON_2S = 0x00000800, + LS_NEON_4S = LS_NEON_2S | NEON_Q, + LS_NEON_1D = 0x00000C00, + LS_NEON_2D = LS_NEON_1D | NEON_Q +}; + +enum NEONScalarFormatField { + NEONScalarFormatFieldMask = 0x00C00000, + NEONScalar = 0x10000000, + NEON_B = 0x00000000, + NEON_H = 0x00400000, + NEON_S = 0x00800000, + NEON_D = 0x00C00000 +}; + +// PC relative addressing. +enum PCRelAddressingOp { + PCRelAddressingFixed = 0x10000000, + PCRelAddressingFMask = 0x1F000000, + PCRelAddressingMask = 0x9F000000, + ADR = PCRelAddressingFixed | 0x00000000, + ADRP = PCRelAddressingFixed | 0x80000000 +}; + +// Add/sub (immediate, shifted and extended.) +const int kSFOffset = 31; +enum AddSubOp { + AddSubOpMask = 0x60000000, + AddSubSetFlagsBit = 0x20000000, + ADD = 0x00000000, + ADDS = ADD | AddSubSetFlagsBit, + SUB = 0x40000000, + SUBS = SUB | AddSubSetFlagsBit +}; + +#define ADD_SUB_OP_LIST(V) \ + V(ADD), \ + V(ADDS), \ + V(SUB), \ + V(SUBS) + +enum AddSubImmediateOp { + AddSubImmediateFixed = 0x11000000, + AddSubImmediateFMask = 0x1F000000, + AddSubImmediateMask = 0xFF000000, + #define ADD_SUB_IMMEDIATE(A) \ + A##_w_imm = AddSubImmediateFixed | A, \ + A##_x_imm = AddSubImmediateFixed | A | SixtyFourBits + ADD_SUB_OP_LIST(ADD_SUB_IMMEDIATE) + #undef ADD_SUB_IMMEDIATE +}; + +enum AddSubShiftedOp { + AddSubShiftedFixed = 0x0B000000, + AddSubShiftedFMask = 0x1F200000, + AddSubShiftedMask = 0xFF200000, + #define ADD_SUB_SHIFTED(A) \ + A##_w_shift = AddSubShiftedFixed | A, \ + A##_x_shift = AddSubShiftedFixed | A | SixtyFourBits + ADD_SUB_OP_LIST(ADD_SUB_SHIFTED) + #undef ADD_SUB_SHIFTED +}; + +enum AddSubExtendedOp { + AddSubExtendedFixed = 0x0B200000, + AddSubExtendedFMask = 0x1F200000, + AddSubExtendedMask = 0xFFE00000, + #define ADD_SUB_EXTENDED(A) \ + A##_w_ext = AddSubExtendedFixed | A, \ + A##_x_ext = AddSubExtendedFixed | A | SixtyFourBits + ADD_SUB_OP_LIST(ADD_SUB_EXTENDED) + #undef ADD_SUB_EXTENDED +}; + +// Add/sub with carry. +enum AddSubWithCarryOp { + AddSubWithCarryFixed = 0x1A000000, + AddSubWithCarryFMask = 0x1FE00000, + AddSubWithCarryMask = 0xFFE0FC00, + ADC_w = AddSubWithCarryFixed | ADD, + ADC_x = AddSubWithCarryFixed | ADD | SixtyFourBits, + ADC = ADC_w, + ADCS_w = AddSubWithCarryFixed | ADDS, + ADCS_x = AddSubWithCarryFixed | ADDS | SixtyFourBits, + SBC_w = AddSubWithCarryFixed | SUB, + SBC_x = AddSubWithCarryFixed | SUB | SixtyFourBits, + SBC = SBC_w, + SBCS_w = AddSubWithCarryFixed | SUBS, + SBCS_x = AddSubWithCarryFixed | SUBS | SixtyFourBits +}; + +// Rotate right into flags. +enum RotateRightIntoFlagsOp { + RotateRightIntoFlagsFixed = 0x1A000400, + RotateRightIntoFlagsFMask = 0x1FE07C00, + RotateRightIntoFlagsMask = 0xFFE07C10, + RMIF = RotateRightIntoFlagsFixed | 0xA0000000 +}; + +// Evaluate into flags. +enum EvaluateIntoFlagsOp { + EvaluateIntoFlagsFixed = 0x1A000800, + EvaluateIntoFlagsFMask = 0x1FE03C00, + EvaluateIntoFlagsMask = 0xFFE07C1F, + SETF8 = EvaluateIntoFlagsFixed | 0x2000000D, + SETF16 = EvaluateIntoFlagsFixed | 0x2000400D +}; + + +// Logical (immediate and shifted register). +enum LogicalOp { + LogicalOpMask = 0x60200000, + NOT = 0x00200000, + AND = 0x00000000, + BIC = AND | NOT, + ORR = 0x20000000, + ORN = ORR | NOT, + EOR = 0x40000000, + EON = EOR | NOT, + ANDS = 0x60000000, + BICS = ANDS | NOT +}; + +// Logical immediate. +enum LogicalImmediateOp { + LogicalImmediateFixed = 0x12000000, + LogicalImmediateFMask = 0x1F800000, + LogicalImmediateMask = 0xFF800000, + AND_w_imm = LogicalImmediateFixed | AND, + AND_x_imm = LogicalImmediateFixed | AND | SixtyFourBits, + ORR_w_imm = LogicalImmediateFixed | ORR, + ORR_x_imm = LogicalImmediateFixed | ORR | SixtyFourBits, + EOR_w_imm = LogicalImmediateFixed | EOR, + EOR_x_imm = LogicalImmediateFixed | EOR | SixtyFourBits, + ANDS_w_imm = LogicalImmediateFixed | ANDS, + ANDS_x_imm = LogicalImmediateFixed | ANDS | SixtyFourBits +}; + +// Logical shifted register. +enum LogicalShiftedOp { + LogicalShiftedFixed = 0x0A000000, + LogicalShiftedFMask = 0x1F000000, + LogicalShiftedMask = 0xFF200000, + AND_w = LogicalShiftedFixed | AND, + AND_x = LogicalShiftedFixed | AND | SixtyFourBits, + AND_shift = AND_w, + BIC_w = LogicalShiftedFixed | BIC, + BIC_x = LogicalShiftedFixed | BIC | SixtyFourBits, + BIC_shift = BIC_w, + ORR_w = LogicalShiftedFixed | ORR, + ORR_x = LogicalShiftedFixed | ORR | SixtyFourBits, + ORR_shift = ORR_w, + ORN_w = LogicalShiftedFixed | ORN, + ORN_x = LogicalShiftedFixed | ORN | SixtyFourBits, + ORN_shift = ORN_w, + EOR_w = LogicalShiftedFixed | EOR, + EOR_x = LogicalShiftedFixed | EOR | SixtyFourBits, + EOR_shift = EOR_w, + EON_w = LogicalShiftedFixed | EON, + EON_x = LogicalShiftedFixed | EON | SixtyFourBits, + EON_shift = EON_w, + ANDS_w = LogicalShiftedFixed | ANDS, + ANDS_x = LogicalShiftedFixed | ANDS | SixtyFourBits, + ANDS_shift = ANDS_w, + BICS_w = LogicalShiftedFixed | BICS, + BICS_x = LogicalShiftedFixed | BICS | SixtyFourBits, + BICS_shift = BICS_w +}; + +// Move wide immediate. +enum MoveWideImmediateOp { + MoveWideImmediateFixed = 0x12800000, + MoveWideImmediateFMask = 0x1F800000, + MoveWideImmediateMask = 0xFF800000, + MOVN = 0x00000000, + MOVZ = 0x40000000, + MOVK = 0x60000000, + MOVN_w = MoveWideImmediateFixed | MOVN, + MOVN_x = MoveWideImmediateFixed | MOVN | SixtyFourBits, + MOVZ_w = MoveWideImmediateFixed | MOVZ, + MOVZ_x = MoveWideImmediateFixed | MOVZ | SixtyFourBits, + MOVK_w = MoveWideImmediateFixed | MOVK, + MOVK_x = MoveWideImmediateFixed | MOVK | SixtyFourBits +}; + +// Bitfield. +const int kBitfieldNOffset = 22; +enum BitfieldOp { + BitfieldFixed = 0x13000000, + BitfieldFMask = 0x1F800000, + BitfieldMask = 0xFF800000, + SBFM_w = BitfieldFixed | 0x00000000, + SBFM_x = BitfieldFixed | 0x80000000, + SBFM = SBFM_w, + BFM_w = BitfieldFixed | 0x20000000, + BFM_x = BitfieldFixed | 0xA0000000, + BFM = BFM_w, + UBFM_w = BitfieldFixed | 0x40000000, + UBFM_x = BitfieldFixed | 0xC0000000, + UBFM = UBFM_w + // Bitfield N field. +}; + +// Extract. +enum ExtractOp { + ExtractFixed = 0x13800000, + ExtractFMask = 0x1F800000, + ExtractMask = 0xFFA00000, + EXTR_w = ExtractFixed | 0x00000000, + EXTR_x = ExtractFixed | 0x80000000, + EXTR = EXTR_w +}; + +// Unconditional branch. +enum UnconditionalBranchOp { + UnconditionalBranchFixed = 0x14000000, + UnconditionalBranchFMask = 0x7C000000, + UnconditionalBranchMask = 0xFC000000, + B = UnconditionalBranchFixed | 0x00000000, + BL = UnconditionalBranchFixed | 0x80000000 +}; + +// Unconditional branch to register. +enum UnconditionalBranchToRegisterOp { + UnconditionalBranchToRegisterFixed = 0xD6000000, + UnconditionalBranchToRegisterFMask = 0xFE000000, + UnconditionalBranchToRegisterMask = 0xFFFFFC00, + BR = UnconditionalBranchToRegisterFixed | 0x001F0000, + BLR = UnconditionalBranchToRegisterFixed | 0x003F0000, + RET = UnconditionalBranchToRegisterFixed | 0x005F0000, + + BRAAZ = UnconditionalBranchToRegisterFixed | 0x001F0800, + BRABZ = UnconditionalBranchToRegisterFixed | 0x001F0C00, + BLRAAZ = UnconditionalBranchToRegisterFixed | 0x003F0800, + BLRABZ = UnconditionalBranchToRegisterFixed | 0x003F0C00, + RETAA = UnconditionalBranchToRegisterFixed | 0x005F0800, + RETAB = UnconditionalBranchToRegisterFixed | 0x005F0C00, + BRAA = UnconditionalBranchToRegisterFixed | 0x011F0800, + BRAB = UnconditionalBranchToRegisterFixed | 0x011F0C00, + BLRAA = UnconditionalBranchToRegisterFixed | 0x013F0800, + BLRAB = UnconditionalBranchToRegisterFixed | 0x013F0C00 +}; + +// Compare and branch. +enum CompareBranchOp { + CompareBranchFixed = 0x34000000, + CompareBranchFMask = 0x7E000000, + CompareBranchMask = 0xFF000000, + CBZ_w = CompareBranchFixed | 0x00000000, + CBZ_x = CompareBranchFixed | 0x80000000, + CBZ = CBZ_w, + CBNZ_w = CompareBranchFixed | 0x01000000, + CBNZ_x = CompareBranchFixed | 0x81000000, + CBNZ = CBNZ_w +}; + +// Test and branch. +enum TestBranchOp { + TestBranchFixed = 0x36000000, + TestBranchFMask = 0x7E000000, + TestBranchMask = 0x7F000000, + TBZ = TestBranchFixed | 0x00000000, + TBNZ = TestBranchFixed | 0x01000000 +}; + +// Conditional branch. +enum ConditionalBranchOp { + ConditionalBranchFixed = 0x54000000, + ConditionalBranchFMask = 0xFE000000, + ConditionalBranchMask = 0xFF000010, + B_cond = ConditionalBranchFixed | 0x00000000 +}; + +// System. +// System instruction encoding is complicated because some instructions use op +// and CR fields to encode parameters. To handle this cleanly, the system +// instructions are split into more than one enum. + +enum SystemOp { + SystemFixed = 0xD5000000, + SystemFMask = 0xFFC00000 +}; + +enum SystemSysRegOp { + SystemSysRegFixed = 0xD5100000, + SystemSysRegFMask = 0xFFD00000, + SystemSysRegMask = 0xFFF00000, + MRS = SystemSysRegFixed | 0x00200000, + MSR = SystemSysRegFixed | 0x00000000 +}; + +enum SystemPStateOp { + SystemPStateFixed = 0xD5004000, + SystemPStateFMask = 0xFFF8F000, + SystemPStateMask = 0xFFFFF0FF, + CFINV = SystemPStateFixed | 0x0000001F, + XAFLAG = SystemPStateFixed | 0x0000003F, + AXFLAG = SystemPStateFixed | 0x0000005F +}; + +enum SystemHintOp { + SystemHintFixed = 0xD503201F, + SystemHintFMask = 0xFFFFF01F, + SystemHintMask = 0xFFFFF01F, + HINT = SystemHintFixed | 0x00000000 +}; + +enum SystemSysOp { + SystemSysFixed = 0xD5080000, + SystemSysFMask = 0xFFF80000, + SystemSysMask = 0xFFF80000, + SYS = SystemSysFixed | 0x00000000 +}; + +// Exception. +enum ExceptionOp { + ExceptionFixed = 0xD4000000, + ExceptionFMask = 0xFF000000, + ExceptionMask = 0xFFE0001F, + HLT = ExceptionFixed | 0x00400000, + BRK = ExceptionFixed | 0x00200000, + SVC = ExceptionFixed | 0x00000001, + HVC = ExceptionFixed | 0x00000002, + SMC = ExceptionFixed | 0x00000003, + DCPS1 = ExceptionFixed | 0x00A00001, + DCPS2 = ExceptionFixed | 0x00A00002, + DCPS3 = ExceptionFixed | 0x00A00003 +}; + +enum MemBarrierOp { + MemBarrierFixed = 0xD503309F, + MemBarrierFMask = 0xFFFFF09F, + MemBarrierMask = 0xFFFFF0FF, + DSB = MemBarrierFixed | 0x00000000, + DMB = MemBarrierFixed | 0x00000020, + ISB = MemBarrierFixed | 0x00000040 +}; + +enum SystemExclusiveMonitorOp { + SystemExclusiveMonitorFixed = 0xD503305F, + SystemExclusiveMonitorFMask = 0xFFFFF0FF, + SystemExclusiveMonitorMask = 0xFFFFF0FF, + CLREX = SystemExclusiveMonitorFixed +}; + +enum SystemPAuthOp { + SystemPAuthFixed = 0xD503211F, + SystemPAuthFMask = 0xFFFFFD1F, + SystemPAuthMask = 0xFFFFFFFF, + PACIA1716 = SystemPAuthFixed | 0x00000100, + PACIB1716 = SystemPAuthFixed | 0x00000140, + AUTIA1716 = SystemPAuthFixed | 0x00000180, + AUTIB1716 = SystemPAuthFixed | 0x000001C0, + PACIAZ = SystemPAuthFixed | 0x00000300, + PACIASP = SystemPAuthFixed | 0x00000320, + PACIBZ = SystemPAuthFixed | 0x00000340, + PACIBSP = SystemPAuthFixed | 0x00000360, + AUTIAZ = SystemPAuthFixed | 0x00000380, + AUTIASP = SystemPAuthFixed | 0x000003A0, + AUTIBZ = SystemPAuthFixed | 0x000003C0, + AUTIBSP = SystemPAuthFixed | 0x000003E0, + + // XPACLRI has the same fixed mask as System Hints and needs to be handled + // differently. + XPACLRI = 0xD50320FF +}; + +// Any load or store. +enum LoadStoreAnyOp { + LoadStoreAnyFMask = 0x0a000000, + LoadStoreAnyFixed = 0x08000000 +}; + +// Any load pair or store pair. +enum LoadStorePairAnyOp { + LoadStorePairAnyFMask = 0x3a000000, + LoadStorePairAnyFixed = 0x28000000 +}; + +#define LOAD_STORE_PAIR_OP_LIST(V) \ + V(STP, w, 0x00000000), \ + V(LDP, w, 0x00400000), \ + V(LDPSW, x, 0x40400000), \ + V(STP, x, 0x80000000), \ + V(LDP, x, 0x80400000), \ + V(STP, s, 0x04000000), \ + V(LDP, s, 0x04400000), \ + V(STP, d, 0x44000000), \ + V(LDP, d, 0x44400000), \ + V(STP, q, 0x84000000), \ + V(LDP, q, 0x84400000) + +// Load/store pair (post, pre and offset.) +enum LoadStorePairOp { + LoadStorePairMask = 0xC4400000, + LoadStorePairLBit = 1 << 22, + #define LOAD_STORE_PAIR(A, B, C) \ + A##_##B = C + LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR) + #undef LOAD_STORE_PAIR +}; + +enum LoadStorePairPostIndexOp { + LoadStorePairPostIndexFixed = 0x28800000, + LoadStorePairPostIndexFMask = 0x3B800000, + LoadStorePairPostIndexMask = 0xFFC00000, + #define LOAD_STORE_PAIR_POST_INDEX(A, B, C) \ + A##_##B##_post = LoadStorePairPostIndexFixed | A##_##B + LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_POST_INDEX) + #undef LOAD_STORE_PAIR_POST_INDEX +}; + +enum LoadStorePairPreIndexOp { + LoadStorePairPreIndexFixed = 0x29800000, + LoadStorePairPreIndexFMask = 0x3B800000, + LoadStorePairPreIndexMask = 0xFFC00000, + #define LOAD_STORE_PAIR_PRE_INDEX(A, B, C) \ + A##_##B##_pre = LoadStorePairPreIndexFixed | A##_##B + LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_PRE_INDEX) + #undef LOAD_STORE_PAIR_PRE_INDEX +}; + +enum LoadStorePairOffsetOp { + LoadStorePairOffsetFixed = 0x29000000, + LoadStorePairOffsetFMask = 0x3B800000, + LoadStorePairOffsetMask = 0xFFC00000, + #define LOAD_STORE_PAIR_OFFSET(A, B, C) \ + A##_##B##_off = LoadStorePairOffsetFixed | A##_##B + LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_OFFSET) + #undef LOAD_STORE_PAIR_OFFSET +}; + +enum LoadStorePairNonTemporalOp { + LoadStorePairNonTemporalFixed = 0x28000000, + LoadStorePairNonTemporalFMask = 0x3B800000, + LoadStorePairNonTemporalMask = 0xFFC00000, + LoadStorePairNonTemporalLBit = 1 << 22, + STNP_w = LoadStorePairNonTemporalFixed | STP_w, + LDNP_w = LoadStorePairNonTemporalFixed | LDP_w, + STNP_x = LoadStorePairNonTemporalFixed | STP_x, + LDNP_x = LoadStorePairNonTemporalFixed | LDP_x, + STNP_s = LoadStorePairNonTemporalFixed | STP_s, + LDNP_s = LoadStorePairNonTemporalFixed | LDP_s, + STNP_d = LoadStorePairNonTemporalFixed | STP_d, + LDNP_d = LoadStorePairNonTemporalFixed | LDP_d, + STNP_q = LoadStorePairNonTemporalFixed | STP_q, + LDNP_q = LoadStorePairNonTemporalFixed | LDP_q +}; + +// Load with pointer authentication. +enum LoadStorePACOp { + LoadStorePACFixed = 0xF8200400, + LoadStorePACFMask = 0xFF200400, + LoadStorePACMask = 0xFFA00C00, + LoadStorePACPreBit = 0x00000800, + LDRAA = LoadStorePACFixed | 0x00000000, + LDRAA_pre = LoadStorePACPreBit | LDRAA, + LDRAB = LoadStorePACFixed | 0x00800000, + LDRAB_pre = LoadStorePACPreBit | LDRAB +}; + +// Load literal. +enum LoadLiteralOp { + LoadLiteralFixed = 0x18000000, + LoadLiteralFMask = 0x3B000000, + LoadLiteralMask = 0xFF000000, + LDR_w_lit = LoadLiteralFixed | 0x00000000, + LDR_x_lit = LoadLiteralFixed | 0x40000000, + LDRSW_x_lit = LoadLiteralFixed | 0x80000000, + PRFM_lit = LoadLiteralFixed | 0xC0000000, + LDR_s_lit = LoadLiteralFixed | 0x04000000, + LDR_d_lit = LoadLiteralFixed | 0x44000000, + LDR_q_lit = LoadLiteralFixed | 0x84000000 +}; + +#define LOAD_STORE_OP_LIST(V) \ + V(ST, RB, w, 0x00000000), \ + V(ST, RH, w, 0x40000000), \ + V(ST, R, w, 0x80000000), \ + V(ST, R, x, 0xC0000000), \ + V(LD, RB, w, 0x00400000), \ + V(LD, RH, w, 0x40400000), \ + V(LD, R, w, 0x80400000), \ + V(LD, R, x, 0xC0400000), \ + V(LD, RSB, x, 0x00800000), \ + V(LD, RSH, x, 0x40800000), \ + V(LD, RSW, x, 0x80800000), \ + V(LD, RSB, w, 0x00C00000), \ + V(LD, RSH, w, 0x40C00000), \ + V(ST, R, b, 0x04000000), \ + V(ST, R, h, 0x44000000), \ + V(ST, R, s, 0x84000000), \ + V(ST, R, d, 0xC4000000), \ + V(ST, R, q, 0x04800000), \ + V(LD, R, b, 0x04400000), \ + V(LD, R, h, 0x44400000), \ + V(LD, R, s, 0x84400000), \ + V(LD, R, d, 0xC4400000), \ + V(LD, R, q, 0x04C00000) + +// Load/store (post, pre, offset and unsigned.) +enum LoadStoreOp { + LoadStoreMask = 0xC4C00000, + LoadStoreVMask = 0x04000000, + #define LOAD_STORE(A, B, C, D) \ + A##B##_##C = D + LOAD_STORE_OP_LIST(LOAD_STORE), + #undef LOAD_STORE + PRFM = 0xC0800000 +}; + +// Load/store unscaled offset. +enum LoadStoreUnscaledOffsetOp { + LoadStoreUnscaledOffsetFixed = 0x38000000, + LoadStoreUnscaledOffsetFMask = 0x3B200C00, + LoadStoreUnscaledOffsetMask = 0xFFE00C00, + PRFUM = LoadStoreUnscaledOffsetFixed | PRFM, + #define LOAD_STORE_UNSCALED(A, B, C, D) \ + A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED) + #undef LOAD_STORE_UNSCALED +}; + +// Load/store post index. +enum LoadStorePostIndex { + LoadStorePostIndexFixed = 0x38000400, + LoadStorePostIndexFMask = 0x3B200C00, + LoadStorePostIndexMask = 0xFFE00C00, + #define LOAD_STORE_POST_INDEX(A, B, C, D) \ + A##B##_##C##_post = LoadStorePostIndexFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_POST_INDEX) + #undef LOAD_STORE_POST_INDEX +}; + +// Load/store pre index. +enum LoadStorePreIndex { + LoadStorePreIndexFixed = 0x38000C00, + LoadStorePreIndexFMask = 0x3B200C00, + LoadStorePreIndexMask = 0xFFE00C00, + #define LOAD_STORE_PRE_INDEX(A, B, C, D) \ + A##B##_##C##_pre = LoadStorePreIndexFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_PRE_INDEX) + #undef LOAD_STORE_PRE_INDEX +}; + +// Load/store unsigned offset. +enum LoadStoreUnsignedOffset { + LoadStoreUnsignedOffsetFixed = 0x39000000, + LoadStoreUnsignedOffsetFMask = 0x3B000000, + LoadStoreUnsignedOffsetMask = 0xFFC00000, + PRFM_unsigned = LoadStoreUnsignedOffsetFixed | PRFM, + #define LOAD_STORE_UNSIGNED_OFFSET(A, B, C, D) \ + A##B##_##C##_unsigned = LoadStoreUnsignedOffsetFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_UNSIGNED_OFFSET) + #undef LOAD_STORE_UNSIGNED_OFFSET +}; + +// Load/store register offset. +enum LoadStoreRegisterOffset { + LoadStoreRegisterOffsetFixed = 0x38200800, + LoadStoreRegisterOffsetFMask = 0x3B200C00, + LoadStoreRegisterOffsetMask = 0xFFE00C00, + PRFM_reg = LoadStoreRegisterOffsetFixed | PRFM, + #define LOAD_STORE_REGISTER_OFFSET(A, B, C, D) \ + A##B##_##C##_reg = LoadStoreRegisterOffsetFixed | D + LOAD_STORE_OP_LIST(LOAD_STORE_REGISTER_OFFSET) + #undef LOAD_STORE_REGISTER_OFFSET +}; + +enum LoadStoreExclusive { + LoadStoreExclusiveFixed = 0x08000000, + LoadStoreExclusiveFMask = 0x3F000000, + LoadStoreExclusiveMask = 0xFFE08000, + STXRB_w = LoadStoreExclusiveFixed | 0x00000000, + STXRH_w = LoadStoreExclusiveFixed | 0x40000000, + STXR_w = LoadStoreExclusiveFixed | 0x80000000, + STXR_x = LoadStoreExclusiveFixed | 0xC0000000, + LDXRB_w = LoadStoreExclusiveFixed | 0x00400000, + LDXRH_w = LoadStoreExclusiveFixed | 0x40400000, + LDXR_w = LoadStoreExclusiveFixed | 0x80400000, + LDXR_x = LoadStoreExclusiveFixed | 0xC0400000, + STXP_w = LoadStoreExclusiveFixed | 0x80200000, + STXP_x = LoadStoreExclusiveFixed | 0xC0200000, + LDXP_w = LoadStoreExclusiveFixed | 0x80600000, + LDXP_x = LoadStoreExclusiveFixed | 0xC0600000, + STLXRB_w = LoadStoreExclusiveFixed | 0x00008000, + STLXRH_w = LoadStoreExclusiveFixed | 0x40008000, + STLXR_w = LoadStoreExclusiveFixed | 0x80008000, + STLXR_x = LoadStoreExclusiveFixed | 0xC0008000, + LDAXRB_w = LoadStoreExclusiveFixed | 0x00408000, + LDAXRH_w = LoadStoreExclusiveFixed | 0x40408000, + LDAXR_w = LoadStoreExclusiveFixed | 0x80408000, + LDAXR_x = LoadStoreExclusiveFixed | 0xC0408000, + STLXP_w = LoadStoreExclusiveFixed | 0x80208000, + STLXP_x = LoadStoreExclusiveFixed | 0xC0208000, + LDAXP_w = LoadStoreExclusiveFixed | 0x80608000, + LDAXP_x = LoadStoreExclusiveFixed | 0xC0608000, + STLRB_w = LoadStoreExclusiveFixed | 0x00808000, + STLRH_w = LoadStoreExclusiveFixed | 0x40808000, + STLR_w = LoadStoreExclusiveFixed | 0x80808000, + STLR_x = LoadStoreExclusiveFixed | 0xC0808000, + LDARB_w = LoadStoreExclusiveFixed | 0x00C08000, + LDARH_w = LoadStoreExclusiveFixed | 0x40C08000, + LDAR_w = LoadStoreExclusiveFixed | 0x80C08000, + LDAR_x = LoadStoreExclusiveFixed | 0xC0C08000, + + // v8.1 Load/store LORegion ops + STLLRB = LoadStoreExclusiveFixed | 0x00800000, + LDLARB = LoadStoreExclusiveFixed | 0x00C00000, + STLLRH = LoadStoreExclusiveFixed | 0x40800000, + LDLARH = LoadStoreExclusiveFixed | 0x40C00000, + STLLR_w = LoadStoreExclusiveFixed | 0x80800000, + LDLAR_w = LoadStoreExclusiveFixed | 0x80C00000, + STLLR_x = LoadStoreExclusiveFixed | 0xC0800000, + LDLAR_x = LoadStoreExclusiveFixed | 0xC0C00000, + + // v8.1 Load/store exclusive ops + LSEBit_l = 0x00400000, + LSEBit_o0 = 0x00008000, + LSEBit_sz = 0x40000000, + CASFixed = LoadStoreExclusiveFixed | 0x80A00000, + CASBFixed = LoadStoreExclusiveFixed | 0x00A00000, + CASHFixed = LoadStoreExclusiveFixed | 0x40A00000, + CASPFixed = LoadStoreExclusiveFixed | 0x00200000, + CAS_w = CASFixed, + CAS_x = CASFixed | LSEBit_sz, + CASA_w = CASFixed | LSEBit_l, + CASA_x = CASFixed | LSEBit_l | LSEBit_sz, + CASL_w = CASFixed | LSEBit_o0, + CASL_x = CASFixed | LSEBit_o0 | LSEBit_sz, + CASAL_w = CASFixed | LSEBit_l | LSEBit_o0, + CASAL_x = CASFixed | LSEBit_l | LSEBit_o0 | LSEBit_sz, + CASB = CASBFixed, + CASAB = CASBFixed | LSEBit_l, + CASLB = CASBFixed | LSEBit_o0, + CASALB = CASBFixed | LSEBit_l | LSEBit_o0, + CASH = CASHFixed, + CASAH = CASHFixed | LSEBit_l, + CASLH = CASHFixed | LSEBit_o0, + CASALH = CASHFixed | LSEBit_l | LSEBit_o0, + CASP_w = CASPFixed, + CASP_x = CASPFixed | LSEBit_sz, + CASPA_w = CASPFixed | LSEBit_l, + CASPA_x = CASPFixed | LSEBit_l | LSEBit_sz, + CASPL_w = CASPFixed | LSEBit_o0, + CASPL_x = CASPFixed | LSEBit_o0 | LSEBit_sz, + CASPAL_w = CASPFixed | LSEBit_l | LSEBit_o0, + CASPAL_x = CASPFixed | LSEBit_l | LSEBit_o0 | LSEBit_sz +}; + +// Load/store RCpc unscaled offset. +enum LoadStoreRCpcUnscaledOffsetOp { + LoadStoreRCpcUnscaledOffsetFixed = 0x19000000, + LoadStoreRCpcUnscaledOffsetFMask = 0x3F200C00, + LoadStoreRCpcUnscaledOffsetMask = 0xFFE00C00, + STLURB = LoadStoreRCpcUnscaledOffsetFixed | 0x00000000, + LDAPURB = LoadStoreRCpcUnscaledOffsetFixed | 0x00400000, + LDAPURSB_x = LoadStoreRCpcUnscaledOffsetFixed | 0x00800000, + LDAPURSB_w = LoadStoreRCpcUnscaledOffsetFixed | 0x00C00000, + STLURH = LoadStoreRCpcUnscaledOffsetFixed | 0x40000000, + LDAPURH = LoadStoreRCpcUnscaledOffsetFixed | 0x40400000, + LDAPURSH_x = LoadStoreRCpcUnscaledOffsetFixed | 0x40800000, + LDAPURSH_w = LoadStoreRCpcUnscaledOffsetFixed | 0x40C00000, + STLUR_w = LoadStoreRCpcUnscaledOffsetFixed | 0x80000000, + LDAPUR_w = LoadStoreRCpcUnscaledOffsetFixed | 0x80400000, + LDAPURSW = LoadStoreRCpcUnscaledOffsetFixed | 0x80800000, + STLUR_x = LoadStoreRCpcUnscaledOffsetFixed | 0xC0000000, + LDAPUR_x = LoadStoreRCpcUnscaledOffsetFixed | 0xC0400000 +}; + +#define ATOMIC_MEMORY_SIMPLE_OPC_LIST(V) \ + V(LDADD, 0x00000000), \ + V(LDCLR, 0x00001000), \ + V(LDEOR, 0x00002000), \ + V(LDSET, 0x00003000), \ + V(LDSMAX, 0x00004000), \ + V(LDSMIN, 0x00005000), \ + V(LDUMAX, 0x00006000), \ + V(LDUMIN, 0x00007000) + +// Atomic memory. +enum AtomicMemoryOp { + AtomicMemoryFixed = 0x38200000, + AtomicMemoryFMask = 0x3B200C00, + AtomicMemoryMask = 0xFFE0FC00, + SWPB = AtomicMemoryFixed | 0x00008000, + SWPAB = AtomicMemoryFixed | 0x00808000, + SWPLB = AtomicMemoryFixed | 0x00408000, + SWPALB = AtomicMemoryFixed | 0x00C08000, + SWPH = AtomicMemoryFixed | 0x40008000, + SWPAH = AtomicMemoryFixed | 0x40808000, + SWPLH = AtomicMemoryFixed | 0x40408000, + SWPALH = AtomicMemoryFixed | 0x40C08000, + SWP_w = AtomicMemoryFixed | 0x80008000, + SWPA_w = AtomicMemoryFixed | 0x80808000, + SWPL_w = AtomicMemoryFixed | 0x80408000, + SWPAL_w = AtomicMemoryFixed | 0x80C08000, + SWP_x = AtomicMemoryFixed | 0xC0008000, + SWPA_x = AtomicMemoryFixed | 0xC0808000, + SWPL_x = AtomicMemoryFixed | 0xC0408000, + SWPAL_x = AtomicMemoryFixed | 0xC0C08000, + LDAPRB = AtomicMemoryFixed | 0x0080C000, + LDAPRH = AtomicMemoryFixed | 0x4080C000, + LDAPR_w = AtomicMemoryFixed | 0x8080C000, + LDAPR_x = AtomicMemoryFixed | 0xC080C000, + + AtomicMemorySimpleFMask = 0x3B208C00, + AtomicMemorySimpleOpMask = 0x00007000, +#define ATOMIC_MEMORY_SIMPLE(N, OP) \ + N##Op = OP, \ + N##B = AtomicMemoryFixed | OP, \ + N##AB = AtomicMemoryFixed | OP | 0x00800000, \ + N##LB = AtomicMemoryFixed | OP | 0x00400000, \ + N##ALB = AtomicMemoryFixed | OP | 0x00C00000, \ + N##H = AtomicMemoryFixed | OP | 0x40000000, \ + N##AH = AtomicMemoryFixed | OP | 0x40800000, \ + N##LH = AtomicMemoryFixed | OP | 0x40400000, \ + N##ALH = AtomicMemoryFixed | OP | 0x40C00000, \ + N##_w = AtomicMemoryFixed | OP | 0x80000000, \ + N##A_w = AtomicMemoryFixed | OP | 0x80800000, \ + N##L_w = AtomicMemoryFixed | OP | 0x80400000, \ + N##AL_w = AtomicMemoryFixed | OP | 0x80C00000, \ + N##_x = AtomicMemoryFixed | OP | 0xC0000000, \ + N##A_x = AtomicMemoryFixed | OP | 0xC0800000, \ + N##L_x = AtomicMemoryFixed | OP | 0xC0400000, \ + N##AL_x = AtomicMemoryFixed | OP | 0xC0C00000 + + ATOMIC_MEMORY_SIMPLE_OPC_LIST(ATOMIC_MEMORY_SIMPLE) +#undef ATOMIC_MEMORY_SIMPLE +}; + +// Conditional compare. +enum ConditionalCompareOp { + ConditionalCompareMask = 0x60000000, + CCMN = 0x20000000, + CCMP = 0x60000000 +}; + +// Conditional compare register. +enum ConditionalCompareRegisterOp { + ConditionalCompareRegisterFixed = 0x1A400000, + ConditionalCompareRegisterFMask = 0x1FE00800, + ConditionalCompareRegisterMask = 0xFFE00C10, + CCMN_w = ConditionalCompareRegisterFixed | CCMN, + CCMN_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMN, + CCMP_w = ConditionalCompareRegisterFixed | CCMP, + CCMP_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMP +}; + +// Conditional compare immediate. +enum ConditionalCompareImmediateOp { + ConditionalCompareImmediateFixed = 0x1A400800, + ConditionalCompareImmediateFMask = 0x1FE00800, + ConditionalCompareImmediateMask = 0xFFE00C10, + CCMN_w_imm = ConditionalCompareImmediateFixed | CCMN, + CCMN_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMN, + CCMP_w_imm = ConditionalCompareImmediateFixed | CCMP, + CCMP_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMP +}; + +// Conditional select. +enum ConditionalSelectOp { + ConditionalSelectFixed = 0x1A800000, + ConditionalSelectFMask = 0x1FE00000, + ConditionalSelectMask = 0xFFE00C00, + CSEL_w = ConditionalSelectFixed | 0x00000000, + CSEL_x = ConditionalSelectFixed | 0x80000000, + CSEL = CSEL_w, + CSINC_w = ConditionalSelectFixed | 0x00000400, + CSINC_x = ConditionalSelectFixed | 0x80000400, + CSINC = CSINC_w, + CSINV_w = ConditionalSelectFixed | 0x40000000, + CSINV_x = ConditionalSelectFixed | 0xC0000000, + CSINV = CSINV_w, + CSNEG_w = ConditionalSelectFixed | 0x40000400, + CSNEG_x = ConditionalSelectFixed | 0xC0000400, + CSNEG = CSNEG_w +}; + +// Data processing 1 source. +enum DataProcessing1SourceOp { + DataProcessing1SourceFixed = 0x5AC00000, + DataProcessing1SourceFMask = 0x5FE00000, + DataProcessing1SourceMask = 0xFFFFFC00, + RBIT = DataProcessing1SourceFixed | 0x00000000, + RBIT_w = RBIT, + RBIT_x = RBIT | SixtyFourBits, + REV16 = DataProcessing1SourceFixed | 0x00000400, + REV16_w = REV16, + REV16_x = REV16 | SixtyFourBits, + REV = DataProcessing1SourceFixed | 0x00000800, + REV_w = REV, + REV32_x = REV | SixtyFourBits, + REV_x = DataProcessing1SourceFixed | SixtyFourBits | 0x00000C00, + CLZ = DataProcessing1SourceFixed | 0x00001000, + CLZ_w = CLZ, + CLZ_x = CLZ | SixtyFourBits, + CLS = DataProcessing1SourceFixed | 0x00001400, + CLS_w = CLS, + CLS_x = CLS | SixtyFourBits, + + // Pointer authentication instructions in Armv8.3. + PACIA = DataProcessing1SourceFixed | 0x80010000, + PACIB = DataProcessing1SourceFixed | 0x80010400, + PACDA = DataProcessing1SourceFixed | 0x80010800, + PACDB = DataProcessing1SourceFixed | 0x80010C00, + AUTIA = DataProcessing1SourceFixed | 0x80011000, + AUTIB = DataProcessing1SourceFixed | 0x80011400, + AUTDA = DataProcessing1SourceFixed | 0x80011800, + AUTDB = DataProcessing1SourceFixed | 0x80011C00, + PACIZA = DataProcessing1SourceFixed | 0x80012000, + PACIZB = DataProcessing1SourceFixed | 0x80012400, + PACDZA = DataProcessing1SourceFixed | 0x80012800, + PACDZB = DataProcessing1SourceFixed | 0x80012C00, + AUTIZA = DataProcessing1SourceFixed | 0x80013000, + AUTIZB = DataProcessing1SourceFixed | 0x80013400, + AUTDZA = DataProcessing1SourceFixed | 0x80013800, + AUTDZB = DataProcessing1SourceFixed | 0x80013C00, + XPACI = DataProcessing1SourceFixed | 0x80014000, + XPACD = DataProcessing1SourceFixed | 0x80014400 +}; + +// Data processing 2 source. +enum DataProcessing2SourceOp { + DataProcessing2SourceFixed = 0x1AC00000, + DataProcessing2SourceFMask = 0x5FE00000, + DataProcessing2SourceMask = 0xFFE0FC00, + UDIV_w = DataProcessing2SourceFixed | 0x00000800, + UDIV_x = DataProcessing2SourceFixed | 0x80000800, + UDIV = UDIV_w, + SDIV_w = DataProcessing2SourceFixed | 0x00000C00, + SDIV_x = DataProcessing2SourceFixed | 0x80000C00, + SDIV = SDIV_w, + LSLV_w = DataProcessing2SourceFixed | 0x00002000, + LSLV_x = DataProcessing2SourceFixed | 0x80002000, + LSLV = LSLV_w, + LSRV_w = DataProcessing2SourceFixed | 0x00002400, + LSRV_x = DataProcessing2SourceFixed | 0x80002400, + LSRV = LSRV_w, + ASRV_w = DataProcessing2SourceFixed | 0x00002800, + ASRV_x = DataProcessing2SourceFixed | 0x80002800, + ASRV = ASRV_w, + RORV_w = DataProcessing2SourceFixed | 0x00002C00, + RORV_x = DataProcessing2SourceFixed | 0x80002C00, + RORV = RORV_w, + PACGA = DataProcessing2SourceFixed | SixtyFourBits | 0x00003000, + CRC32B = DataProcessing2SourceFixed | 0x00004000, + CRC32H = DataProcessing2SourceFixed | 0x00004400, + CRC32W = DataProcessing2SourceFixed | 0x00004800, + CRC32X = DataProcessing2SourceFixed | SixtyFourBits | 0x00004C00, + CRC32CB = DataProcessing2SourceFixed | 0x00005000, + CRC32CH = DataProcessing2SourceFixed | 0x00005400, + CRC32CW = DataProcessing2SourceFixed | 0x00005800, + CRC32CX = DataProcessing2SourceFixed | SixtyFourBits | 0x00005C00 +}; + +// Data processing 3 source. +enum DataProcessing3SourceOp { + DataProcessing3SourceFixed = 0x1B000000, + DataProcessing3SourceFMask = 0x1F000000, + DataProcessing3SourceMask = 0xFFE08000, + MADD_w = DataProcessing3SourceFixed | 0x00000000, + MADD_x = DataProcessing3SourceFixed | 0x80000000, + MADD = MADD_w, + MSUB_w = DataProcessing3SourceFixed | 0x00008000, + MSUB_x = DataProcessing3SourceFixed | 0x80008000, + MSUB = MSUB_w, + SMADDL_x = DataProcessing3SourceFixed | 0x80200000, + SMSUBL_x = DataProcessing3SourceFixed | 0x80208000, + SMULH_x = DataProcessing3SourceFixed | 0x80400000, + UMADDL_x = DataProcessing3SourceFixed | 0x80A00000, + UMSUBL_x = DataProcessing3SourceFixed | 0x80A08000, + UMULH_x = DataProcessing3SourceFixed | 0x80C00000 +}; + +// Floating point compare. +enum FPCompareOp { + FPCompareFixed = 0x1E202000, + FPCompareFMask = 0x5F203C00, + FPCompareMask = 0xFFE0FC1F, + FCMP_h = FPCompareFixed | FP16 | 0x00000000, + FCMP_s = FPCompareFixed | 0x00000000, + FCMP_d = FPCompareFixed | FP64 | 0x00000000, + FCMP = FCMP_s, + FCMP_h_zero = FPCompareFixed | FP16 | 0x00000008, + FCMP_s_zero = FPCompareFixed | 0x00000008, + FCMP_d_zero = FPCompareFixed | FP64 | 0x00000008, + FCMP_zero = FCMP_s_zero, + FCMPE_h = FPCompareFixed | FP16 | 0x00000010, + FCMPE_s = FPCompareFixed | 0x00000010, + FCMPE_d = FPCompareFixed | FP64 | 0x00000010, + FCMPE = FCMPE_s, + FCMPE_h_zero = FPCompareFixed | FP16 | 0x00000018, + FCMPE_s_zero = FPCompareFixed | 0x00000018, + FCMPE_d_zero = FPCompareFixed | FP64 | 0x00000018, + FCMPE_zero = FCMPE_s_zero +}; + +// Floating point conditional compare. +enum FPConditionalCompareOp { + FPConditionalCompareFixed = 0x1E200400, + FPConditionalCompareFMask = 0x5F200C00, + FPConditionalCompareMask = 0xFFE00C10, + FCCMP_h = FPConditionalCompareFixed | FP16 | 0x00000000, + FCCMP_s = FPConditionalCompareFixed | 0x00000000, + FCCMP_d = FPConditionalCompareFixed | FP64 | 0x00000000, + FCCMP = FCCMP_s, + FCCMPE_h = FPConditionalCompareFixed | FP16 | 0x00000010, + FCCMPE_s = FPConditionalCompareFixed | 0x00000010, + FCCMPE_d = FPConditionalCompareFixed | FP64 | 0x00000010, + FCCMPE = FCCMPE_s +}; + +// Floating point conditional select. +enum FPConditionalSelectOp { + FPConditionalSelectFixed = 0x1E200C00, + FPConditionalSelectFMask = 0x5F200C00, + FPConditionalSelectMask = 0xFFE00C00, + FCSEL_h = FPConditionalSelectFixed | FP16 | 0x00000000, + FCSEL_s = FPConditionalSelectFixed | 0x00000000, + FCSEL_d = FPConditionalSelectFixed | FP64 | 0x00000000, + FCSEL = FCSEL_s +}; + +// Floating point immediate. +enum FPImmediateOp { + FPImmediateFixed = 0x1E201000, + FPImmediateFMask = 0x5F201C00, + FPImmediateMask = 0xFFE01C00, + FMOV_h_imm = FPImmediateFixed | FP16 | 0x00000000, + FMOV_s_imm = FPImmediateFixed | 0x00000000, + FMOV_d_imm = FPImmediateFixed | FP64 | 0x00000000 +}; + +// Floating point data processing 1 source. +enum FPDataProcessing1SourceOp { + FPDataProcessing1SourceFixed = 0x1E204000, + FPDataProcessing1SourceFMask = 0x5F207C00, + FPDataProcessing1SourceMask = 0xFFFFFC00, + FMOV_h = FPDataProcessing1SourceFixed | FP16 | 0x00000000, + FMOV_s = FPDataProcessing1SourceFixed | 0x00000000, + FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000, + FMOV = FMOV_s, + FABS_h = FPDataProcessing1SourceFixed | FP16 | 0x00008000, + FABS_s = FPDataProcessing1SourceFixed | 0x00008000, + FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000, + FABS = FABS_s, + FNEG_h = FPDataProcessing1SourceFixed | FP16 | 0x00010000, + FNEG_s = FPDataProcessing1SourceFixed | 0x00010000, + FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000, + FNEG = FNEG_s, + FSQRT_h = FPDataProcessing1SourceFixed | FP16 | 0x00018000, + FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000, + FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000, + FSQRT = FSQRT_s, + FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000, + FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000, + FCVT_hs = FPDataProcessing1SourceFixed | 0x00038000, + FCVT_hd = FPDataProcessing1SourceFixed | FP64 | 0x00038000, + FCVT_sh = FPDataProcessing1SourceFixed | 0x00C20000, + FCVT_dh = FPDataProcessing1SourceFixed | 0x00C28000, + FRINTN_h = FPDataProcessing1SourceFixed | FP16 | 0x00040000, + FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000, + FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000, + FRINTN = FRINTN_s, + FRINTP_h = FPDataProcessing1SourceFixed | FP16 | 0x00048000, + FRINTP_s = FPDataProcessing1SourceFixed | 0x00048000, + FRINTP_d = FPDataProcessing1SourceFixed | FP64 | 0x00048000, + FRINTP = FRINTP_s, + FRINTM_h = FPDataProcessing1SourceFixed | FP16 | 0x00050000, + FRINTM_s = FPDataProcessing1SourceFixed | 0x00050000, + FRINTM_d = FPDataProcessing1SourceFixed | FP64 | 0x00050000, + FRINTM = FRINTM_s, + FRINTZ_h = FPDataProcessing1SourceFixed | FP16 | 0x00058000, + FRINTZ_s = FPDataProcessing1SourceFixed | 0x00058000, + FRINTZ_d = FPDataProcessing1SourceFixed | FP64 | 0x00058000, + FRINTZ = FRINTZ_s, + FRINTA_h = FPDataProcessing1SourceFixed | FP16 | 0x00060000, + FRINTA_s = FPDataProcessing1SourceFixed | 0x00060000, + FRINTA_d = FPDataProcessing1SourceFixed | FP64 | 0x00060000, + FRINTA = FRINTA_s, + FRINTX_h = FPDataProcessing1SourceFixed | FP16 | 0x00070000, + FRINTX_s = FPDataProcessing1SourceFixed | 0x00070000, + FRINTX_d = FPDataProcessing1SourceFixed | FP64 | 0x00070000, + FRINTX = FRINTX_s, + FRINTI_h = FPDataProcessing1SourceFixed | FP16 | 0x00078000, + FRINTI_s = FPDataProcessing1SourceFixed | 0x00078000, + FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000, + FRINTI = FRINTI_s +}; + +// Floating point data processing 2 source. +enum FPDataProcessing2SourceOp { + FPDataProcessing2SourceFixed = 0x1E200800, + FPDataProcessing2SourceFMask = 0x5F200C00, + FPDataProcessing2SourceMask = 0xFFE0FC00, + FMUL = FPDataProcessing2SourceFixed | 0x00000000, + FMUL_h = FMUL | FP16, + FMUL_s = FMUL, + FMUL_d = FMUL | FP64, + FDIV = FPDataProcessing2SourceFixed | 0x00001000, + FDIV_h = FDIV | FP16, + FDIV_s = FDIV, + FDIV_d = FDIV | FP64, + FADD = FPDataProcessing2SourceFixed | 0x00002000, + FADD_h = FADD | FP16, + FADD_s = FADD, + FADD_d = FADD | FP64, + FSUB = FPDataProcessing2SourceFixed | 0x00003000, + FSUB_h = FSUB | FP16, + FSUB_s = FSUB, + FSUB_d = FSUB | FP64, + FMAX = FPDataProcessing2SourceFixed | 0x00004000, + FMAX_h = FMAX | FP16, + FMAX_s = FMAX, + FMAX_d = FMAX | FP64, + FMIN = FPDataProcessing2SourceFixed | 0x00005000, + FMIN_h = FMIN | FP16, + FMIN_s = FMIN, + FMIN_d = FMIN | FP64, + FMAXNM = FPDataProcessing2SourceFixed | 0x00006000, + FMAXNM_h = FMAXNM | FP16, + FMAXNM_s = FMAXNM, + FMAXNM_d = FMAXNM | FP64, + FMINNM = FPDataProcessing2SourceFixed | 0x00007000, + FMINNM_h = FMINNM | FP16, + FMINNM_s = FMINNM, + FMINNM_d = FMINNM | FP64, + FNMUL = FPDataProcessing2SourceFixed | 0x00008000, + FNMUL_h = FNMUL | FP16, + FNMUL_s = FNMUL, + FNMUL_d = FNMUL | FP64 +}; + +// Floating point data processing 3 source. +enum FPDataProcessing3SourceOp { + FPDataProcessing3SourceFixed = 0x1F000000, + FPDataProcessing3SourceFMask = 0x5F000000, + FPDataProcessing3SourceMask = 0xFFE08000, + FMADD_h = FPDataProcessing3SourceFixed | 0x00C00000, + FMSUB_h = FPDataProcessing3SourceFixed | 0x00C08000, + FNMADD_h = FPDataProcessing3SourceFixed | 0x00E00000, + FNMSUB_h = FPDataProcessing3SourceFixed | 0x00E08000, + FMADD_s = FPDataProcessing3SourceFixed | 0x00000000, + FMSUB_s = FPDataProcessing3SourceFixed | 0x00008000, + FNMADD_s = FPDataProcessing3SourceFixed | 0x00200000, + FNMSUB_s = FPDataProcessing3SourceFixed | 0x00208000, + FMADD_d = FPDataProcessing3SourceFixed | 0x00400000, + FMSUB_d = FPDataProcessing3SourceFixed | 0x00408000, + FNMADD_d = FPDataProcessing3SourceFixed | 0x00600000, + FNMSUB_d = FPDataProcessing3SourceFixed | 0x00608000 +}; + +// Conversion between floating point and integer. +enum FPIntegerConvertOp { + FPIntegerConvertFixed = 0x1E200000, + FPIntegerConvertFMask = 0x5F20FC00, + FPIntegerConvertMask = 0xFFFFFC00, + FCVTNS = FPIntegerConvertFixed | 0x00000000, + FCVTNS_wh = FCVTNS | FP16, + FCVTNS_xh = FCVTNS | SixtyFourBits | FP16, + FCVTNS_ws = FCVTNS, + FCVTNS_xs = FCVTNS | SixtyFourBits, + FCVTNS_wd = FCVTNS | FP64, + FCVTNS_xd = FCVTNS | SixtyFourBits | FP64, + FCVTNU = FPIntegerConvertFixed | 0x00010000, + FCVTNU_wh = FCVTNU | FP16, + FCVTNU_xh = FCVTNU | SixtyFourBits | FP16, + FCVTNU_ws = FCVTNU, + FCVTNU_xs = FCVTNU | SixtyFourBits, + FCVTNU_wd = FCVTNU | FP64, + FCVTNU_xd = FCVTNU | SixtyFourBits | FP64, + FCVTPS = FPIntegerConvertFixed | 0x00080000, + FCVTPS_wh = FCVTPS | FP16, + FCVTPS_xh = FCVTPS | SixtyFourBits | FP16, + FCVTPS_ws = FCVTPS, + FCVTPS_xs = FCVTPS | SixtyFourBits, + FCVTPS_wd = FCVTPS | FP64, + FCVTPS_xd = FCVTPS | SixtyFourBits | FP64, + FCVTPU = FPIntegerConvertFixed | 0x00090000, + FCVTPU_wh = FCVTPU | FP16, + FCVTPU_xh = FCVTPU | SixtyFourBits | FP16, + FCVTPU_ws = FCVTPU, + FCVTPU_xs = FCVTPU | SixtyFourBits, + FCVTPU_wd = FCVTPU | FP64, + FCVTPU_xd = FCVTPU | SixtyFourBits | FP64, + FCVTMS = FPIntegerConvertFixed | 0x00100000, + FCVTMS_wh = FCVTMS | FP16, + FCVTMS_xh = FCVTMS | SixtyFourBits | FP16, + FCVTMS_ws = FCVTMS, + FCVTMS_xs = FCVTMS | SixtyFourBits, + FCVTMS_wd = FCVTMS | FP64, + FCVTMS_xd = FCVTMS | SixtyFourBits | FP64, + FCVTMU = FPIntegerConvertFixed | 0x00110000, + FCVTMU_wh = FCVTMU | FP16, + FCVTMU_xh = FCVTMU | SixtyFourBits | FP16, + FCVTMU_ws = FCVTMU, + FCVTMU_xs = FCVTMU | SixtyFourBits, + FCVTMU_wd = FCVTMU | FP64, + FCVTMU_xd = FCVTMU | SixtyFourBits | FP64, + FCVTZS = FPIntegerConvertFixed | 0x00180000, + FCVTZS_wh = FCVTZS | FP16, + FCVTZS_xh = FCVTZS | SixtyFourBits | FP16, + FCVTZS_ws = FCVTZS, + FCVTZS_xs = FCVTZS | SixtyFourBits, + FCVTZS_wd = FCVTZS | FP64, + FCVTZS_xd = FCVTZS | SixtyFourBits | FP64, + FCVTZU = FPIntegerConvertFixed | 0x00190000, + FCVTZU_wh = FCVTZU | FP16, + FCVTZU_xh = FCVTZU | SixtyFourBits | FP16, + FCVTZU_ws = FCVTZU, + FCVTZU_xs = FCVTZU | SixtyFourBits, + FCVTZU_wd = FCVTZU | FP64, + FCVTZU_xd = FCVTZU | SixtyFourBits | FP64, + SCVTF = FPIntegerConvertFixed | 0x00020000, + SCVTF_hw = SCVTF | FP16, + SCVTF_hx = SCVTF | SixtyFourBits | FP16, + SCVTF_sw = SCVTF, + SCVTF_sx = SCVTF | SixtyFourBits, + SCVTF_dw = SCVTF | FP64, + SCVTF_dx = SCVTF | SixtyFourBits | FP64, + UCVTF = FPIntegerConvertFixed | 0x00030000, + UCVTF_hw = UCVTF | FP16, + UCVTF_hx = UCVTF | SixtyFourBits | FP16, + UCVTF_sw = UCVTF, + UCVTF_sx = UCVTF | SixtyFourBits, + UCVTF_dw = UCVTF | FP64, + UCVTF_dx = UCVTF | SixtyFourBits | FP64, + FCVTAS = FPIntegerConvertFixed | 0x00040000, + FCVTAS_wh = FCVTAS | FP16, + FCVTAS_xh = FCVTAS | SixtyFourBits | FP16, + FCVTAS_ws = FCVTAS, + FCVTAS_xs = FCVTAS | SixtyFourBits, + FCVTAS_wd = FCVTAS | FP64, + FCVTAS_xd = FCVTAS | SixtyFourBits | FP64, + FCVTAU = FPIntegerConvertFixed | 0x00050000, + FCVTAU_wh = FCVTAU | FP16, + FCVTAU_xh = FCVTAU | SixtyFourBits | FP16, + FCVTAU_ws = FCVTAU, + FCVTAU_xs = FCVTAU | SixtyFourBits, + FCVTAU_wd = FCVTAU | FP64, + FCVTAU_xd = FCVTAU | SixtyFourBits | FP64, + FMOV_wh = FPIntegerConvertFixed | 0x00060000 | FP16, + FMOV_hw = FPIntegerConvertFixed | 0x00070000 | FP16, + FMOV_xh = FMOV_wh | SixtyFourBits, + FMOV_hx = FMOV_hw | SixtyFourBits, + FMOV_ws = FPIntegerConvertFixed | 0x00060000, + FMOV_sw = FPIntegerConvertFixed | 0x00070000, + FMOV_xd = FMOV_ws | SixtyFourBits | FP64, + FMOV_dx = FMOV_sw | SixtyFourBits | FP64, + FMOV_d1_x = FPIntegerConvertFixed | SixtyFourBits | 0x008F0000, + FMOV_x_d1 = FPIntegerConvertFixed | SixtyFourBits | 0x008E0000, + FJCVTZS = FPIntegerConvertFixed | FP64 | 0x001E0000 +}; + +// Conversion between fixed point and floating point. +enum FPFixedPointConvertOp { + FPFixedPointConvertFixed = 0x1E000000, + FPFixedPointConvertFMask = 0x5F200000, + FPFixedPointConvertMask = 0xFFFF0000, + FCVTZS_fixed = FPFixedPointConvertFixed | 0x00180000, + FCVTZS_wh_fixed = FCVTZS_fixed | FP16, + FCVTZS_xh_fixed = FCVTZS_fixed | SixtyFourBits | FP16, + FCVTZS_ws_fixed = FCVTZS_fixed, + FCVTZS_xs_fixed = FCVTZS_fixed | SixtyFourBits, + FCVTZS_wd_fixed = FCVTZS_fixed | FP64, + FCVTZS_xd_fixed = FCVTZS_fixed | SixtyFourBits | FP64, + FCVTZU_fixed = FPFixedPointConvertFixed | 0x00190000, + FCVTZU_wh_fixed = FCVTZU_fixed | FP16, + FCVTZU_xh_fixed = FCVTZU_fixed | SixtyFourBits | FP16, + FCVTZU_ws_fixed = FCVTZU_fixed, + FCVTZU_xs_fixed = FCVTZU_fixed | SixtyFourBits, + FCVTZU_wd_fixed = FCVTZU_fixed | FP64, + FCVTZU_xd_fixed = FCVTZU_fixed | SixtyFourBits | FP64, + SCVTF_fixed = FPFixedPointConvertFixed | 0x00020000, + SCVTF_hw_fixed = SCVTF_fixed | FP16, + SCVTF_hx_fixed = SCVTF_fixed | SixtyFourBits | FP16, + SCVTF_sw_fixed = SCVTF_fixed, + SCVTF_sx_fixed = SCVTF_fixed | SixtyFourBits, + SCVTF_dw_fixed = SCVTF_fixed | FP64, + SCVTF_dx_fixed = SCVTF_fixed | SixtyFourBits | FP64, + UCVTF_fixed = FPFixedPointConvertFixed | 0x00030000, + UCVTF_hw_fixed = UCVTF_fixed | FP16, + UCVTF_hx_fixed = UCVTF_fixed | SixtyFourBits | FP16, + UCVTF_sw_fixed = UCVTF_fixed, + UCVTF_sx_fixed = UCVTF_fixed | SixtyFourBits, + UCVTF_dw_fixed = UCVTF_fixed | FP64, + UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64 +}; + +// Crypto - two register SHA. +enum Crypto2RegSHAOp { + Crypto2RegSHAFixed = 0x5E280800, + Crypto2RegSHAFMask = 0xFF3E0C00 +}; + +// Crypto - three register SHA. +enum Crypto3RegSHAOp { + Crypto3RegSHAFixed = 0x5E000000, + Crypto3RegSHAFMask = 0xFF208C00 +}; + +// Crypto - AES. +enum CryptoAESOp { + CryptoAESFixed = 0x4E280800, + CryptoAESFMask = 0xFF3E0C00 +}; + +// NEON instructions with two register operands. +enum NEON2RegMiscOp { + NEON2RegMiscFixed = 0x0E200800, + NEON2RegMiscFMask = 0x9F3E0C00, + NEON2RegMiscMask = 0xBF3FFC00, + NEON2RegMiscUBit = 0x20000000, + NEON_REV64 = NEON2RegMiscFixed | 0x00000000, + NEON_REV32 = NEON2RegMiscFixed | 0x20000000, + NEON_REV16 = NEON2RegMiscFixed | 0x00001000, + NEON_SADDLP = NEON2RegMiscFixed | 0x00002000, + NEON_UADDLP = NEON_SADDLP | NEON2RegMiscUBit, + NEON_SUQADD = NEON2RegMiscFixed | 0x00003000, + NEON_USQADD = NEON_SUQADD | NEON2RegMiscUBit, + NEON_CLS = NEON2RegMiscFixed | 0x00004000, + NEON_CLZ = NEON2RegMiscFixed | 0x20004000, + NEON_CNT = NEON2RegMiscFixed | 0x00005000, + NEON_RBIT_NOT = NEON2RegMiscFixed | 0x20005000, + NEON_SADALP = NEON2RegMiscFixed | 0x00006000, + NEON_UADALP = NEON_SADALP | NEON2RegMiscUBit, + NEON_SQABS = NEON2RegMiscFixed | 0x00007000, + NEON_SQNEG = NEON2RegMiscFixed | 0x20007000, + NEON_CMGT_zero = NEON2RegMiscFixed | 0x00008000, + NEON_CMGE_zero = NEON2RegMiscFixed | 0x20008000, + NEON_CMEQ_zero = NEON2RegMiscFixed | 0x00009000, + NEON_CMLE_zero = NEON2RegMiscFixed | 0x20009000, + NEON_CMLT_zero = NEON2RegMiscFixed | 0x0000A000, + NEON_ABS = NEON2RegMiscFixed | 0x0000B000, + NEON_NEG = NEON2RegMiscFixed | 0x2000B000, + NEON_XTN = NEON2RegMiscFixed | 0x00012000, + NEON_SQXTUN = NEON2RegMiscFixed | 0x20012000, + NEON_SHLL = NEON2RegMiscFixed | 0x20013000, + NEON_SQXTN = NEON2RegMiscFixed | 0x00014000, + NEON_UQXTN = NEON_SQXTN | NEON2RegMiscUBit, + + NEON2RegMiscOpcode = 0x0001F000, + NEON_RBIT_NOT_opcode = NEON_RBIT_NOT & NEON2RegMiscOpcode, + NEON_NEG_opcode = NEON_NEG & NEON2RegMiscOpcode, + NEON_XTN_opcode = NEON_XTN & NEON2RegMiscOpcode, + NEON_UQXTN_opcode = NEON_UQXTN & NEON2RegMiscOpcode, + + // These instructions use only one bit of the size field. The other bit is + // used to distinguish between instructions. + NEON2RegMiscFPMask = NEON2RegMiscMask | 0x00800000, + NEON_FABS = NEON2RegMiscFixed | 0x0080F000, + NEON_FNEG = NEON2RegMiscFixed | 0x2080F000, + NEON_FCVTN = NEON2RegMiscFixed | 0x00016000, + NEON_FCVTXN = NEON2RegMiscFixed | 0x20016000, + NEON_FCVTL = NEON2RegMiscFixed | 0x00017000, + NEON_FRINTN = NEON2RegMiscFixed | 0x00018000, + NEON_FRINTA = NEON2RegMiscFixed | 0x20018000, + NEON_FRINTP = NEON2RegMiscFixed | 0x00818000, + NEON_FRINTM = NEON2RegMiscFixed | 0x00019000, + NEON_FRINTX = NEON2RegMiscFixed | 0x20019000, + NEON_FRINTZ = NEON2RegMiscFixed | 0x00819000, + NEON_FRINTI = NEON2RegMiscFixed | 0x20819000, + NEON_FCVTNS = NEON2RegMiscFixed | 0x0001A000, + NEON_FCVTNU = NEON_FCVTNS | NEON2RegMiscUBit, + NEON_FCVTPS = NEON2RegMiscFixed | 0x0081A000, + NEON_FCVTPU = NEON_FCVTPS | NEON2RegMiscUBit, + NEON_FCVTMS = NEON2RegMiscFixed | 0x0001B000, + NEON_FCVTMU = NEON_FCVTMS | NEON2RegMiscUBit, + NEON_FCVTZS = NEON2RegMiscFixed | 0x0081B000, + NEON_FCVTZU = NEON_FCVTZS | NEON2RegMiscUBit, + NEON_FCVTAS = NEON2RegMiscFixed | 0x0001C000, + NEON_FCVTAU = NEON_FCVTAS | NEON2RegMiscUBit, + NEON_FSQRT = NEON2RegMiscFixed | 0x2081F000, + NEON_SCVTF = NEON2RegMiscFixed | 0x0001D000, + NEON_UCVTF = NEON_SCVTF | NEON2RegMiscUBit, + NEON_URSQRTE = NEON2RegMiscFixed | 0x2081C000, + NEON_URECPE = NEON2RegMiscFixed | 0x0081C000, + NEON_FRSQRTE = NEON2RegMiscFixed | 0x2081D000, + NEON_FRECPE = NEON2RegMiscFixed | 0x0081D000, + NEON_FCMGT_zero = NEON2RegMiscFixed | 0x0080C000, + NEON_FCMGE_zero = NEON2RegMiscFixed | 0x2080C000, + NEON_FCMEQ_zero = NEON2RegMiscFixed | 0x0080D000, + NEON_FCMLE_zero = NEON2RegMiscFixed | 0x2080D000, + NEON_FCMLT_zero = NEON2RegMiscFixed | 0x0080E000, + + NEON_FCVTL_opcode = NEON_FCVTL & NEON2RegMiscOpcode, + NEON_FCVTN_opcode = NEON_FCVTN & NEON2RegMiscOpcode +}; + +// NEON instructions with two register operands (FP16). +enum NEON2RegMiscFP16Op { + NEON2RegMiscFP16Fixed = 0x0E780800, + NEON2RegMiscFP16FMask = 0x9F7E0C00, + NEON2RegMiscFP16Mask = 0xBFFFFC00, + NEON_FRINTN_H = NEON2RegMiscFP16Fixed | 0x00018000, + NEON_FRINTM_H = NEON2RegMiscFP16Fixed | 0x00019000, + NEON_FCVTNS_H = NEON2RegMiscFP16Fixed | 0x0001A000, + NEON_FCVTMS_H = NEON2RegMiscFP16Fixed | 0x0001B000, + NEON_FCVTAS_H = NEON2RegMiscFP16Fixed | 0x0001C000, + NEON_SCVTF_H = NEON2RegMiscFP16Fixed | 0x0001D000, + NEON_FCMGT_H_zero = NEON2RegMiscFP16Fixed | 0x0080C000, + NEON_FCMEQ_H_zero = NEON2RegMiscFP16Fixed | 0x0080D000, + NEON_FCMLT_H_zero = NEON2RegMiscFP16Fixed | 0x0080E000, + NEON_FABS_H = NEON2RegMiscFP16Fixed | 0x0080F000, + NEON_FRINTP_H = NEON2RegMiscFP16Fixed | 0x00818000, + NEON_FRINTZ_H = NEON2RegMiscFP16Fixed | 0x00819000, + NEON_FCVTPS_H = NEON2RegMiscFP16Fixed | 0x0081A000, + NEON_FCVTZS_H = NEON2RegMiscFP16Fixed | 0x0081B000, + NEON_FRECPE_H = NEON2RegMiscFP16Fixed | 0x0081D000, + NEON_FRINTA_H = NEON2RegMiscFP16Fixed | 0x20018000, + NEON_FRINTX_H = NEON2RegMiscFP16Fixed | 0x20019000, + NEON_FCVTNU_H = NEON2RegMiscFP16Fixed | 0x2001A000, + NEON_FCVTMU_H = NEON2RegMiscFP16Fixed | 0x2001B000, + NEON_FCVTAU_H = NEON2RegMiscFP16Fixed | 0x2001C000, + NEON_UCVTF_H = NEON2RegMiscFP16Fixed | 0x2001D000, + NEON_FCMGE_H_zero = NEON2RegMiscFP16Fixed | 0x2080C000, + NEON_FCMLE_H_zero = NEON2RegMiscFP16Fixed | 0x2080D000, + NEON_FNEG_H = NEON2RegMiscFP16Fixed | 0x2080F000, + NEON_FRINTI_H = NEON2RegMiscFP16Fixed | 0x20819000, + NEON_FCVTPU_H = NEON2RegMiscFP16Fixed | 0x2081A000, + NEON_FCVTZU_H = NEON2RegMiscFP16Fixed | 0x2081B000, + NEON_FRSQRTE_H = NEON2RegMiscFP16Fixed | 0x2081D000, + NEON_FSQRT_H = NEON2RegMiscFP16Fixed | 0x2081F000 +}; + +// NEON instructions with three same-type operands. +enum NEON3SameOp { + NEON3SameFixed = 0x0E200400, + NEON3SameFMask = 0x9F200400, + NEON3SameMask = 0xBF20FC00, + NEON3SameUBit = 0x20000000, + NEON_ADD = NEON3SameFixed | 0x00008000, + NEON_ADDP = NEON3SameFixed | 0x0000B800, + NEON_SHADD = NEON3SameFixed | 0x00000000, + NEON_SHSUB = NEON3SameFixed | 0x00002000, + NEON_SRHADD = NEON3SameFixed | 0x00001000, + NEON_CMEQ = NEON3SameFixed | NEON3SameUBit | 0x00008800, + NEON_CMGE = NEON3SameFixed | 0x00003800, + NEON_CMGT = NEON3SameFixed | 0x00003000, + NEON_CMHI = NEON3SameFixed | NEON3SameUBit | NEON_CMGT, + NEON_CMHS = NEON3SameFixed | NEON3SameUBit | NEON_CMGE, + NEON_CMTST = NEON3SameFixed | 0x00008800, + NEON_MLA = NEON3SameFixed | 0x00009000, + NEON_MLS = NEON3SameFixed | 0x20009000, + NEON_MUL = NEON3SameFixed | 0x00009800, + NEON_PMUL = NEON3SameFixed | 0x20009800, + NEON_SRSHL = NEON3SameFixed | 0x00005000, + NEON_SQSHL = NEON3SameFixed | 0x00004800, + NEON_SQRSHL = NEON3SameFixed | 0x00005800, + NEON_SSHL = NEON3SameFixed | 0x00004000, + NEON_SMAX = NEON3SameFixed | 0x00006000, + NEON_SMAXP = NEON3SameFixed | 0x0000A000, + NEON_SMIN = NEON3SameFixed | 0x00006800, + NEON_SMINP = NEON3SameFixed | 0x0000A800, + NEON_SABD = NEON3SameFixed | 0x00007000, + NEON_SABA = NEON3SameFixed | 0x00007800, + NEON_UABD = NEON3SameFixed | NEON3SameUBit | NEON_SABD, + NEON_UABA = NEON3SameFixed | NEON3SameUBit | NEON_SABA, + NEON_SQADD = NEON3SameFixed | 0x00000800, + NEON_SQSUB = NEON3SameFixed | 0x00002800, + NEON_SUB = NEON3SameFixed | NEON3SameUBit | 0x00008000, + NEON_UHADD = NEON3SameFixed | NEON3SameUBit | NEON_SHADD, + NEON_UHSUB = NEON3SameFixed | NEON3SameUBit | NEON_SHSUB, + NEON_URHADD = NEON3SameFixed | NEON3SameUBit | NEON_SRHADD, + NEON_UMAX = NEON3SameFixed | NEON3SameUBit | NEON_SMAX, + NEON_UMAXP = NEON3SameFixed | NEON3SameUBit | NEON_SMAXP, + NEON_UMIN = NEON3SameFixed | NEON3SameUBit | NEON_SMIN, + NEON_UMINP = NEON3SameFixed | NEON3SameUBit | NEON_SMINP, + NEON_URSHL = NEON3SameFixed | NEON3SameUBit | NEON_SRSHL, + NEON_UQADD = NEON3SameFixed | NEON3SameUBit | NEON_SQADD, + NEON_UQRSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQRSHL, + NEON_UQSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQSHL, + NEON_UQSUB = NEON3SameFixed | NEON3SameUBit | NEON_SQSUB, + NEON_USHL = NEON3SameFixed | NEON3SameUBit | NEON_SSHL, + NEON_SQDMULH = NEON3SameFixed | 0x0000B000, + NEON_SQRDMULH = NEON3SameFixed | 0x2000B000, + + // NEON floating point instructions with three same-type operands. + NEON3SameFPFixed = NEON3SameFixed | 0x0000C000, + NEON3SameFPFMask = NEON3SameFMask | 0x0000C000, + NEON3SameFPMask = NEON3SameMask | 0x00800000, + NEON_FADD = NEON3SameFixed | 0x0000D000, + NEON_FSUB = NEON3SameFixed | 0x0080D000, + NEON_FMUL = NEON3SameFixed | 0x2000D800, + NEON_FDIV = NEON3SameFixed | 0x2000F800, + NEON_FMAX = NEON3SameFixed | 0x0000F000, + NEON_FMAXNM = NEON3SameFixed | 0x0000C000, + NEON_FMAXP = NEON3SameFixed | 0x2000F000, + NEON_FMAXNMP = NEON3SameFixed | 0x2000C000, + NEON_FMIN = NEON3SameFixed | 0x0080F000, + NEON_FMINNM = NEON3SameFixed | 0x0080C000, + NEON_FMINP = NEON3SameFixed | 0x2080F000, + NEON_FMINNMP = NEON3SameFixed | 0x2080C000, + NEON_FMLA = NEON3SameFixed | 0x0000C800, + NEON_FMLS = NEON3SameFixed | 0x0080C800, + NEON_FMULX = NEON3SameFixed | 0x0000D800, + NEON_FRECPS = NEON3SameFixed | 0x0000F800, + NEON_FRSQRTS = NEON3SameFixed | 0x0080F800, + NEON_FABD = NEON3SameFixed | 0x2080D000, + NEON_FADDP = NEON3SameFixed | 0x2000D000, + NEON_FCMEQ = NEON3SameFixed | 0x0000E000, + NEON_FCMGE = NEON3SameFixed | 0x2000E000, + NEON_FCMGT = NEON3SameFixed | 0x2080E000, + NEON_FACGE = NEON3SameFixed | 0x2000E800, + NEON_FACGT = NEON3SameFixed | 0x2080E800, + + // NEON logical instructions with three same-type operands. + NEON3SameLogicalFixed = NEON3SameFixed | 0x00001800, + NEON3SameLogicalFMask = NEON3SameFMask | 0x0000F800, + NEON3SameLogicalMask = 0xBFE0FC00, + NEON3SameLogicalFormatMask = NEON_Q, + NEON_AND = NEON3SameLogicalFixed | 0x00000000, + NEON_ORR = NEON3SameLogicalFixed | 0x00A00000, + NEON_ORN = NEON3SameLogicalFixed | 0x00C00000, + NEON_EOR = NEON3SameLogicalFixed | 0x20000000, + NEON_BIC = NEON3SameLogicalFixed | 0x00400000, + NEON_BIF = NEON3SameLogicalFixed | 0x20C00000, + NEON_BIT = NEON3SameLogicalFixed | 0x20800000, + NEON_BSL = NEON3SameLogicalFixed | 0x20400000, + + // FHM (FMLAL-like) instructions have an oddball encoding scheme under 3Same. + NEON3SameFHMMask = 0xBFE0FC00, // U size opcode + NEON_FMLAL = NEON3SameFixed | 0x0000E800, // 0 00 11101 + NEON_FMLAL2 = NEON3SameFixed | 0x2000C800, // 1 00 11001 + NEON_FMLSL = NEON3SameFixed | 0x0080E800, // 0 10 11101 + NEON_FMLSL2 = NEON3SameFixed | 0x2080C800 // 1 10 11001 +}; + + +enum NEON3SameFP16 { + NEON3SameFP16Fixed = 0x0E400400, + NEON3SameFP16FMask = 0x9F60C400, + NEON3SameFP16Mask = 0xBFE0FC00, + NEON_FMAXNM_H = NEON3SameFP16Fixed | 0x00000000, + NEON_FMLA_H = NEON3SameFP16Fixed | 0x00000800, + NEON_FADD_H = NEON3SameFP16Fixed | 0x00001000, + NEON_FMULX_H = NEON3SameFP16Fixed | 0x00001800, + NEON_FCMEQ_H = NEON3SameFP16Fixed | 0x00002000, + NEON_FMAX_H = NEON3SameFP16Fixed | 0x00003000, + NEON_FRECPS_H = NEON3SameFP16Fixed | 0x00003800, + NEON_FMINNM_H = NEON3SameFP16Fixed | 0x00800000, + NEON_FMLS_H = NEON3SameFP16Fixed | 0x00800800, + NEON_FSUB_H = NEON3SameFP16Fixed | 0x00801000, + NEON_FMIN_H = NEON3SameFP16Fixed | 0x00803000, + NEON_FRSQRTS_H = NEON3SameFP16Fixed | 0x00803800, + NEON_FMAXNMP_H = NEON3SameFP16Fixed | 0x20000000, + NEON_FADDP_H = NEON3SameFP16Fixed | 0x20001000, + NEON_FMUL_H = NEON3SameFP16Fixed | 0x20001800, + NEON_FCMGE_H = NEON3SameFP16Fixed | 0x20002000, + NEON_FACGE_H = NEON3SameFP16Fixed | 0x20002800, + NEON_FMAXP_H = NEON3SameFP16Fixed | 0x20003000, + NEON_FDIV_H = NEON3SameFP16Fixed | 0x20003800, + NEON_FMINNMP_H = NEON3SameFP16Fixed | 0x20800000, + NEON_FABD_H = NEON3SameFP16Fixed | 0x20801000, + NEON_FCMGT_H = NEON3SameFP16Fixed | 0x20802000, + NEON_FACGT_H = NEON3SameFP16Fixed | 0x20802800, + NEON_FMINP_H = NEON3SameFP16Fixed | 0x20803000 +}; + + +// 'Extra' NEON instructions with three same-type operands. +enum NEON3SameExtraOp { + NEON3SameExtraFixed = 0x0E008400, + NEON3SameExtraUBit = 0x20000000, + NEON3SameExtraFMask = 0x9E208400, + NEON3SameExtraMask = 0xBE20FC00, + NEON_SQRDMLAH = NEON3SameExtraFixed | NEON3SameExtraUBit, + NEON_SQRDMLSH = NEON3SameExtraFixed | NEON3SameExtraUBit | 0x00000800, + NEON_SDOT = NEON3SameExtraFixed | 0x00001000, + NEON_UDOT = NEON3SameExtraFixed | NEON3SameExtraUBit | 0x00001000, + + /* v8.3 Complex Numbers */ + NEON3SameExtraFCFixed = 0x2E00C400, + NEON3SameExtraFCFMask = 0xBF20C400, + // FCMLA fixes opcode<3:2>, and uses opcode<1:0> to encode . + NEON3SameExtraFCMLAMask = NEON3SameExtraFCFMask | 0x00006000, + NEON_FCMLA = NEON3SameExtraFCFixed, + // FCADD fixes opcode<3:2, 0>, and uses opcode<1> to encode . + NEON3SameExtraFCADDMask = NEON3SameExtraFCFMask | 0x00006800, + NEON_FCADD = NEON3SameExtraFCFixed | 0x00002000 + // Other encodings under NEON3SameExtraFCFMask are UNALLOCATED. +}; + +// NEON instructions with three different-type operands. +enum NEON3DifferentOp { + NEON3DifferentFixed = 0x0E200000, + NEON3DifferentFMask = 0x9F200C00, + NEON3DifferentMask = 0xFF20FC00, + NEON_ADDHN = NEON3DifferentFixed | 0x00004000, + NEON_ADDHN2 = NEON_ADDHN | NEON_Q, + NEON_PMULL = NEON3DifferentFixed | 0x0000E000, + NEON_PMULL2 = NEON_PMULL | NEON_Q, + NEON_RADDHN = NEON3DifferentFixed | 0x20004000, + NEON_RADDHN2 = NEON_RADDHN | NEON_Q, + NEON_RSUBHN = NEON3DifferentFixed | 0x20006000, + NEON_RSUBHN2 = NEON_RSUBHN | NEON_Q, + NEON_SABAL = NEON3DifferentFixed | 0x00005000, + NEON_SABAL2 = NEON_SABAL | NEON_Q, + NEON_SABDL = NEON3DifferentFixed | 0x00007000, + NEON_SABDL2 = NEON_SABDL | NEON_Q, + NEON_SADDL = NEON3DifferentFixed | 0x00000000, + NEON_SADDL2 = NEON_SADDL | NEON_Q, + NEON_SADDW = NEON3DifferentFixed | 0x00001000, + NEON_SADDW2 = NEON_SADDW | NEON_Q, + NEON_SMLAL = NEON3DifferentFixed | 0x00008000, + NEON_SMLAL2 = NEON_SMLAL | NEON_Q, + NEON_SMLSL = NEON3DifferentFixed | 0x0000A000, + NEON_SMLSL2 = NEON_SMLSL | NEON_Q, + NEON_SMULL = NEON3DifferentFixed | 0x0000C000, + NEON_SMULL2 = NEON_SMULL | NEON_Q, + NEON_SSUBL = NEON3DifferentFixed | 0x00002000, + NEON_SSUBL2 = NEON_SSUBL | NEON_Q, + NEON_SSUBW = NEON3DifferentFixed | 0x00003000, + NEON_SSUBW2 = NEON_SSUBW | NEON_Q, + NEON_SQDMLAL = NEON3DifferentFixed | 0x00009000, + NEON_SQDMLAL2 = NEON_SQDMLAL | NEON_Q, + NEON_SQDMLSL = NEON3DifferentFixed | 0x0000B000, + NEON_SQDMLSL2 = NEON_SQDMLSL | NEON_Q, + NEON_SQDMULL = NEON3DifferentFixed | 0x0000D000, + NEON_SQDMULL2 = NEON_SQDMULL | NEON_Q, + NEON_SUBHN = NEON3DifferentFixed | 0x00006000, + NEON_SUBHN2 = NEON_SUBHN | NEON_Q, + NEON_UABAL = NEON_SABAL | NEON3SameUBit, + NEON_UABAL2 = NEON_UABAL | NEON_Q, + NEON_UABDL = NEON_SABDL | NEON3SameUBit, + NEON_UABDL2 = NEON_UABDL | NEON_Q, + NEON_UADDL = NEON_SADDL | NEON3SameUBit, + NEON_UADDL2 = NEON_UADDL | NEON_Q, + NEON_UADDW = NEON_SADDW | NEON3SameUBit, + NEON_UADDW2 = NEON_UADDW | NEON_Q, + NEON_UMLAL = NEON_SMLAL | NEON3SameUBit, + NEON_UMLAL2 = NEON_UMLAL | NEON_Q, + NEON_UMLSL = NEON_SMLSL | NEON3SameUBit, + NEON_UMLSL2 = NEON_UMLSL | NEON_Q, + NEON_UMULL = NEON_SMULL | NEON3SameUBit, + NEON_UMULL2 = NEON_UMULL | NEON_Q, + NEON_USUBL = NEON_SSUBL | NEON3SameUBit, + NEON_USUBL2 = NEON_USUBL | NEON_Q, + NEON_USUBW = NEON_SSUBW | NEON3SameUBit, + NEON_USUBW2 = NEON_USUBW | NEON_Q +}; + +// NEON instructions operating across vectors. +enum NEONAcrossLanesOp { + NEONAcrossLanesFixed = 0x0E300800, + NEONAcrossLanesFMask = 0x9F3E0C00, + NEONAcrossLanesMask = 0xBF3FFC00, + NEON_ADDV = NEONAcrossLanesFixed | 0x0001B000, + NEON_SADDLV = NEONAcrossLanesFixed | 0x00003000, + NEON_UADDLV = NEONAcrossLanesFixed | 0x20003000, + NEON_SMAXV = NEONAcrossLanesFixed | 0x0000A000, + NEON_SMINV = NEONAcrossLanesFixed | 0x0001A000, + NEON_UMAXV = NEONAcrossLanesFixed | 0x2000A000, + NEON_UMINV = NEONAcrossLanesFixed | 0x2001A000, + + NEONAcrossLanesFP16Fixed = NEONAcrossLanesFixed | 0x0000C000, + NEONAcrossLanesFP16FMask = NEONAcrossLanesFMask | 0x2000C000, + NEONAcrossLanesFP16Mask = NEONAcrossLanesMask | 0x20800000, + NEON_FMAXNMV_H = NEONAcrossLanesFP16Fixed | 0x00000000, + NEON_FMAXV_H = NEONAcrossLanesFP16Fixed | 0x00003000, + NEON_FMINNMV_H = NEONAcrossLanesFP16Fixed | 0x00800000, + NEON_FMINV_H = NEONAcrossLanesFP16Fixed | 0x00803000, + + // NEON floating point across instructions. + NEONAcrossLanesFPFixed = NEONAcrossLanesFixed | 0x2000C000, + NEONAcrossLanesFPFMask = NEONAcrossLanesFMask | 0x2000C000, + NEONAcrossLanesFPMask = NEONAcrossLanesMask | 0x20800000, + + NEON_FMAXV = NEONAcrossLanesFPFixed | 0x2000F000, + NEON_FMINV = NEONAcrossLanesFPFixed | 0x2080F000, + NEON_FMAXNMV = NEONAcrossLanesFPFixed | 0x2000C000, + NEON_FMINNMV = NEONAcrossLanesFPFixed | 0x2080C000 +}; + +// NEON instructions with indexed element operand. +enum NEONByIndexedElementOp { + NEONByIndexedElementFixed = 0x0F000000, + NEONByIndexedElementFMask = 0x9F000400, + NEONByIndexedElementMask = 0xBF00F400, + NEON_MUL_byelement = NEONByIndexedElementFixed | 0x00008000, + NEON_MLA_byelement = NEONByIndexedElementFixed | 0x20000000, + NEON_MLS_byelement = NEONByIndexedElementFixed | 0x20004000, + NEON_SMULL_byelement = NEONByIndexedElementFixed | 0x0000A000, + NEON_SMLAL_byelement = NEONByIndexedElementFixed | 0x00002000, + NEON_SMLSL_byelement = NEONByIndexedElementFixed | 0x00006000, + NEON_UMULL_byelement = NEONByIndexedElementFixed | 0x2000A000, + NEON_UMLAL_byelement = NEONByIndexedElementFixed | 0x20002000, + NEON_UMLSL_byelement = NEONByIndexedElementFixed | 0x20006000, + NEON_SQDMULL_byelement = NEONByIndexedElementFixed | 0x0000B000, + NEON_SQDMLAL_byelement = NEONByIndexedElementFixed | 0x00003000, + NEON_SQDMLSL_byelement = NEONByIndexedElementFixed | 0x00007000, + NEON_SQDMULH_byelement = NEONByIndexedElementFixed | 0x0000C000, + NEON_SQRDMULH_byelement = NEONByIndexedElementFixed | 0x0000D000, + NEON_SDOT_byelement = NEONByIndexedElementFixed | 0x0000E000, + NEON_SQRDMLAH_byelement = NEONByIndexedElementFixed | 0x2000D000, + NEON_UDOT_byelement = NEONByIndexedElementFixed | 0x2000E000, + NEON_SQRDMLSH_byelement = NEONByIndexedElementFixed | 0x2000F000, + + NEON_FMLA_H_byelement = NEONByIndexedElementFixed | 0x00001000, + NEON_FMLS_H_byelement = NEONByIndexedElementFixed | 0x00005000, + NEON_FMUL_H_byelement = NEONByIndexedElementFixed | 0x00009000, + NEON_FMULX_H_byelement = NEONByIndexedElementFixed | 0x20009000, + + // Floating point instructions. + NEONByIndexedElementFPFixed = NEONByIndexedElementFixed | 0x00800000, + NEONByIndexedElementFPMask = NEONByIndexedElementMask | 0x00800000, + NEON_FMLA_byelement = NEONByIndexedElementFPFixed | 0x00001000, + NEON_FMLS_byelement = NEONByIndexedElementFPFixed | 0x00005000, + NEON_FMUL_byelement = NEONByIndexedElementFPFixed | 0x00009000, + NEON_FMULX_byelement = NEONByIndexedElementFPFixed | 0x20009000, + + // FMLAL-like instructions. + // For all cases: U = x, size = 10, opcode = xx00 + NEONByIndexedElementFPLongFixed = NEONByIndexedElementFixed | 0x00800000, + NEONByIndexedElementFPLongFMask = NEONByIndexedElementFMask | 0x00C03000, + NEONByIndexedElementFPLongMask = 0xBFC0F400, + NEON_FMLAL_H_byelement = NEONByIndexedElementFixed | 0x00800000, + NEON_FMLAL2_H_byelement = NEONByIndexedElementFixed | 0x20808000, + NEON_FMLSL_H_byelement = NEONByIndexedElementFixed | 0x00804000, + NEON_FMLSL2_H_byelement = NEONByIndexedElementFixed | 0x2080C000, + + // Complex instruction(s). + // This is necessary because the 'rot' encoding moves into the + // NEONByIndex..Mask space. + NEONByIndexedElementFPComplexMask = 0xBF009400, + NEON_FCMLA_byelement = NEONByIndexedElementFixed | 0x20001000 +}; + +// NEON register copy. +enum NEONCopyOp { + NEONCopyFixed = 0x0E000400, + NEONCopyFMask = 0x9FE08400, + NEONCopyMask = 0x3FE08400, + NEONCopyInsElementMask = NEONCopyMask | 0x40000000, + NEONCopyInsGeneralMask = NEONCopyMask | 0x40007800, + NEONCopyDupElementMask = NEONCopyMask | 0x20007800, + NEONCopyDupGeneralMask = NEONCopyDupElementMask, + NEONCopyUmovMask = NEONCopyMask | 0x20007800, + NEONCopySmovMask = NEONCopyMask | 0x20007800, + NEON_INS_ELEMENT = NEONCopyFixed | 0x60000000, + NEON_INS_GENERAL = NEONCopyFixed | 0x40001800, + NEON_DUP_ELEMENT = NEONCopyFixed | 0x00000000, + NEON_DUP_GENERAL = NEONCopyFixed | 0x00000800, + NEON_SMOV = NEONCopyFixed | 0x00002800, + NEON_UMOV = NEONCopyFixed | 0x00003800 +}; + +// NEON extract. +enum NEONExtractOp { + NEONExtractFixed = 0x2E000000, + NEONExtractFMask = 0xBF208400, + NEONExtractMask = 0xBFE08400, + NEON_EXT = NEONExtractFixed | 0x00000000 +}; + +enum NEONLoadStoreMultiOp { + NEONLoadStoreMultiL = 0x00400000, + NEONLoadStoreMulti1_1v = 0x00007000, + NEONLoadStoreMulti1_2v = 0x0000A000, + NEONLoadStoreMulti1_3v = 0x00006000, + NEONLoadStoreMulti1_4v = 0x00002000, + NEONLoadStoreMulti2 = 0x00008000, + NEONLoadStoreMulti3 = 0x00004000, + NEONLoadStoreMulti4 = 0x00000000 +}; + +// NEON load/store multiple structures. +enum NEONLoadStoreMultiStructOp { + NEONLoadStoreMultiStructFixed = 0x0C000000, + NEONLoadStoreMultiStructFMask = 0xBFBF0000, + NEONLoadStoreMultiStructMask = 0xBFFFF000, + NEONLoadStoreMultiStructStore = NEONLoadStoreMultiStructFixed, + NEONLoadStoreMultiStructLoad = NEONLoadStoreMultiStructFixed | + NEONLoadStoreMultiL, + NEON_LD1_1v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_1v, + NEON_LD1_2v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_2v, + NEON_LD1_3v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_3v, + NEON_LD1_4v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_4v, + NEON_LD2 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti2, + NEON_LD3 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti3, + NEON_LD4 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti4, + NEON_ST1_1v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_1v, + NEON_ST1_2v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_2v, + NEON_ST1_3v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_3v, + NEON_ST1_4v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_4v, + NEON_ST2 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti2, + NEON_ST3 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti3, + NEON_ST4 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti4 +}; + +// NEON load/store multiple structures with post-index addressing. +enum NEONLoadStoreMultiStructPostIndexOp { + NEONLoadStoreMultiStructPostIndexFixed = 0x0C800000, + NEONLoadStoreMultiStructPostIndexFMask = 0xBFA00000, + NEONLoadStoreMultiStructPostIndexMask = 0xBFE0F000, + NEONLoadStoreMultiStructPostIndex = 0x00800000, + NEON_LD1_1v_post = NEON_LD1_1v | NEONLoadStoreMultiStructPostIndex, + NEON_LD1_2v_post = NEON_LD1_2v | NEONLoadStoreMultiStructPostIndex, + NEON_LD1_3v_post = NEON_LD1_3v | NEONLoadStoreMultiStructPostIndex, + NEON_LD1_4v_post = NEON_LD1_4v | NEONLoadStoreMultiStructPostIndex, + NEON_LD2_post = NEON_LD2 | NEONLoadStoreMultiStructPostIndex, + NEON_LD3_post = NEON_LD3 | NEONLoadStoreMultiStructPostIndex, + NEON_LD4_post = NEON_LD4 | NEONLoadStoreMultiStructPostIndex, + NEON_ST1_1v_post = NEON_ST1_1v | NEONLoadStoreMultiStructPostIndex, + NEON_ST1_2v_post = NEON_ST1_2v | NEONLoadStoreMultiStructPostIndex, + NEON_ST1_3v_post = NEON_ST1_3v | NEONLoadStoreMultiStructPostIndex, + NEON_ST1_4v_post = NEON_ST1_4v | NEONLoadStoreMultiStructPostIndex, + NEON_ST2_post = NEON_ST2 | NEONLoadStoreMultiStructPostIndex, + NEON_ST3_post = NEON_ST3 | NEONLoadStoreMultiStructPostIndex, + NEON_ST4_post = NEON_ST4 | NEONLoadStoreMultiStructPostIndex +}; + +enum NEONLoadStoreSingleOp { + NEONLoadStoreSingle1 = 0x00000000, + NEONLoadStoreSingle2 = 0x00200000, + NEONLoadStoreSingle3 = 0x00002000, + NEONLoadStoreSingle4 = 0x00202000, + NEONLoadStoreSingleL = 0x00400000, + NEONLoadStoreSingle_b = 0x00000000, + NEONLoadStoreSingle_h = 0x00004000, + NEONLoadStoreSingle_s = 0x00008000, + NEONLoadStoreSingle_d = 0x00008400, + NEONLoadStoreSingleAllLanes = 0x0000C000, + NEONLoadStoreSingleLenMask = 0x00202000 +}; + +// NEON load/store single structure. +enum NEONLoadStoreSingleStructOp { + NEONLoadStoreSingleStructFixed = 0x0D000000, + NEONLoadStoreSingleStructFMask = 0xBF9F0000, + NEONLoadStoreSingleStructMask = 0xBFFFE000, + NEONLoadStoreSingleStructStore = NEONLoadStoreSingleStructFixed, + NEONLoadStoreSingleStructLoad = NEONLoadStoreSingleStructFixed | + NEONLoadStoreSingleL, + NEONLoadStoreSingleStructLoad1 = NEONLoadStoreSingle1 | + NEONLoadStoreSingleStructLoad, + NEONLoadStoreSingleStructLoad2 = NEONLoadStoreSingle2 | + NEONLoadStoreSingleStructLoad, + NEONLoadStoreSingleStructLoad3 = NEONLoadStoreSingle3 | + NEONLoadStoreSingleStructLoad, + NEONLoadStoreSingleStructLoad4 = NEONLoadStoreSingle4 | + NEONLoadStoreSingleStructLoad, + NEONLoadStoreSingleStructStore1 = NEONLoadStoreSingle1 | + NEONLoadStoreSingleStructFixed, + NEONLoadStoreSingleStructStore2 = NEONLoadStoreSingle2 | + NEONLoadStoreSingleStructFixed, + NEONLoadStoreSingleStructStore3 = NEONLoadStoreSingle3 | + NEONLoadStoreSingleStructFixed, + NEONLoadStoreSingleStructStore4 = NEONLoadStoreSingle4 | + NEONLoadStoreSingleStructFixed, + NEON_LD1_b = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_b, + NEON_LD1_h = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_h, + NEON_LD1_s = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_s, + NEON_LD1_d = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_d, + NEON_LD1R = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingleAllLanes, + NEON_ST1_b = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_b, + NEON_ST1_h = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_h, + NEON_ST1_s = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_s, + NEON_ST1_d = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_d, + + NEON_LD2_b = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_b, + NEON_LD2_h = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_h, + NEON_LD2_s = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_s, + NEON_LD2_d = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_d, + NEON_LD2R = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingleAllLanes, + NEON_ST2_b = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_b, + NEON_ST2_h = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_h, + NEON_ST2_s = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_s, + NEON_ST2_d = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_d, + + NEON_LD3_b = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_b, + NEON_LD3_h = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_h, + NEON_LD3_s = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_s, + NEON_LD3_d = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_d, + NEON_LD3R = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingleAllLanes, + NEON_ST3_b = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_b, + NEON_ST3_h = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_h, + NEON_ST3_s = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_s, + NEON_ST3_d = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_d, + + NEON_LD4_b = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_b, + NEON_LD4_h = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_h, + NEON_LD4_s = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_s, + NEON_LD4_d = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_d, + NEON_LD4R = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingleAllLanes, + NEON_ST4_b = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_b, + NEON_ST4_h = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_h, + NEON_ST4_s = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_s, + NEON_ST4_d = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_d +}; + +// NEON load/store single structure with post-index addressing. +enum NEONLoadStoreSingleStructPostIndexOp { + NEONLoadStoreSingleStructPostIndexFixed = 0x0D800000, + NEONLoadStoreSingleStructPostIndexFMask = 0xBF800000, + NEONLoadStoreSingleStructPostIndexMask = 0xBFE0E000, + NEONLoadStoreSingleStructPostIndex = 0x00800000, + NEON_LD1_b_post = NEON_LD1_b | NEONLoadStoreSingleStructPostIndex, + NEON_LD1_h_post = NEON_LD1_h | NEONLoadStoreSingleStructPostIndex, + NEON_LD1_s_post = NEON_LD1_s | NEONLoadStoreSingleStructPostIndex, + NEON_LD1_d_post = NEON_LD1_d | NEONLoadStoreSingleStructPostIndex, + NEON_LD1R_post = NEON_LD1R | NEONLoadStoreSingleStructPostIndex, + NEON_ST1_b_post = NEON_ST1_b | NEONLoadStoreSingleStructPostIndex, + NEON_ST1_h_post = NEON_ST1_h | NEONLoadStoreSingleStructPostIndex, + NEON_ST1_s_post = NEON_ST1_s | NEONLoadStoreSingleStructPostIndex, + NEON_ST1_d_post = NEON_ST1_d | NEONLoadStoreSingleStructPostIndex, + + NEON_LD2_b_post = NEON_LD2_b | NEONLoadStoreSingleStructPostIndex, + NEON_LD2_h_post = NEON_LD2_h | NEONLoadStoreSingleStructPostIndex, + NEON_LD2_s_post = NEON_LD2_s | NEONLoadStoreSingleStructPostIndex, + NEON_LD2_d_post = NEON_LD2_d | NEONLoadStoreSingleStructPostIndex, + NEON_LD2R_post = NEON_LD2R | NEONLoadStoreSingleStructPostIndex, + NEON_ST2_b_post = NEON_ST2_b | NEONLoadStoreSingleStructPostIndex, + NEON_ST2_h_post = NEON_ST2_h | NEONLoadStoreSingleStructPostIndex, + NEON_ST2_s_post = NEON_ST2_s | NEONLoadStoreSingleStructPostIndex, + NEON_ST2_d_post = NEON_ST2_d | NEONLoadStoreSingleStructPostIndex, + + NEON_LD3_b_post = NEON_LD3_b | NEONLoadStoreSingleStructPostIndex, + NEON_LD3_h_post = NEON_LD3_h | NEONLoadStoreSingleStructPostIndex, + NEON_LD3_s_post = NEON_LD3_s | NEONLoadStoreSingleStructPostIndex, + NEON_LD3_d_post = NEON_LD3_d | NEONLoadStoreSingleStructPostIndex, + NEON_LD3R_post = NEON_LD3R | NEONLoadStoreSingleStructPostIndex, + NEON_ST3_b_post = NEON_ST3_b | NEONLoadStoreSingleStructPostIndex, + NEON_ST3_h_post = NEON_ST3_h | NEONLoadStoreSingleStructPostIndex, + NEON_ST3_s_post = NEON_ST3_s | NEONLoadStoreSingleStructPostIndex, + NEON_ST3_d_post = NEON_ST3_d | NEONLoadStoreSingleStructPostIndex, + + NEON_LD4_b_post = NEON_LD4_b | NEONLoadStoreSingleStructPostIndex, + NEON_LD4_h_post = NEON_LD4_h | NEONLoadStoreSingleStructPostIndex, + NEON_LD4_s_post = NEON_LD4_s | NEONLoadStoreSingleStructPostIndex, + NEON_LD4_d_post = NEON_LD4_d | NEONLoadStoreSingleStructPostIndex, + NEON_LD4R_post = NEON_LD4R | NEONLoadStoreSingleStructPostIndex, + NEON_ST4_b_post = NEON_ST4_b | NEONLoadStoreSingleStructPostIndex, + NEON_ST4_h_post = NEON_ST4_h | NEONLoadStoreSingleStructPostIndex, + NEON_ST4_s_post = NEON_ST4_s | NEONLoadStoreSingleStructPostIndex, + NEON_ST4_d_post = NEON_ST4_d | NEONLoadStoreSingleStructPostIndex +}; + +// NEON modified immediate. +enum NEONModifiedImmediateOp { + NEONModifiedImmediateFixed = 0x0F000400, + NEONModifiedImmediateFMask = 0x9FF80400, + NEONModifiedImmediateOpBit = 0x20000000, + NEONModifiedImmediate_FMOV = NEONModifiedImmediateFixed | 0x00000800, + NEONModifiedImmediate_MOVI = NEONModifiedImmediateFixed | 0x00000000, + NEONModifiedImmediate_MVNI = NEONModifiedImmediateFixed | 0x20000000, + NEONModifiedImmediate_ORR = NEONModifiedImmediateFixed | 0x00001000, + NEONModifiedImmediate_BIC = NEONModifiedImmediateFixed | 0x20001000 +}; + +// NEON shift immediate. +enum NEONShiftImmediateOp { + NEONShiftImmediateFixed = 0x0F000400, + NEONShiftImmediateFMask = 0x9F800400, + NEONShiftImmediateMask = 0xBF80FC00, + NEONShiftImmediateUBit = 0x20000000, + NEON_SHL = NEONShiftImmediateFixed | 0x00005000, + NEON_SSHLL = NEONShiftImmediateFixed | 0x0000A000, + NEON_USHLL = NEONShiftImmediateFixed | 0x2000A000, + NEON_SLI = NEONShiftImmediateFixed | 0x20005000, + NEON_SRI = NEONShiftImmediateFixed | 0x20004000, + NEON_SHRN = NEONShiftImmediateFixed | 0x00008000, + NEON_RSHRN = NEONShiftImmediateFixed | 0x00008800, + NEON_UQSHRN = NEONShiftImmediateFixed | 0x20009000, + NEON_UQRSHRN = NEONShiftImmediateFixed | 0x20009800, + NEON_SQSHRN = NEONShiftImmediateFixed | 0x00009000, + NEON_SQRSHRN = NEONShiftImmediateFixed | 0x00009800, + NEON_SQSHRUN = NEONShiftImmediateFixed | 0x20008000, + NEON_SQRSHRUN = NEONShiftImmediateFixed | 0x20008800, + NEON_SSHR = NEONShiftImmediateFixed | 0x00000000, + NEON_SRSHR = NEONShiftImmediateFixed | 0x00002000, + NEON_USHR = NEONShiftImmediateFixed | 0x20000000, + NEON_URSHR = NEONShiftImmediateFixed | 0x20002000, + NEON_SSRA = NEONShiftImmediateFixed | 0x00001000, + NEON_SRSRA = NEONShiftImmediateFixed | 0x00003000, + NEON_USRA = NEONShiftImmediateFixed | 0x20001000, + NEON_URSRA = NEONShiftImmediateFixed | 0x20003000, + NEON_SQSHLU = NEONShiftImmediateFixed | 0x20006000, + NEON_SCVTF_imm = NEONShiftImmediateFixed | 0x0000E000, + NEON_UCVTF_imm = NEONShiftImmediateFixed | 0x2000E000, + NEON_FCVTZS_imm = NEONShiftImmediateFixed | 0x0000F800, + NEON_FCVTZU_imm = NEONShiftImmediateFixed | 0x2000F800, + NEON_SQSHL_imm = NEONShiftImmediateFixed | 0x00007000, + NEON_UQSHL_imm = NEONShiftImmediateFixed | 0x20007000 +}; + +// NEON table. +enum NEONTableOp { + NEONTableFixed = 0x0E000000, + NEONTableFMask = 0xBF208C00, + NEONTableExt = 0x00001000, + NEONTableMask = 0xBF20FC00, + NEON_TBL_1v = NEONTableFixed | 0x00000000, + NEON_TBL_2v = NEONTableFixed | 0x00002000, + NEON_TBL_3v = NEONTableFixed | 0x00004000, + NEON_TBL_4v = NEONTableFixed | 0x00006000, + NEON_TBX_1v = NEON_TBL_1v | NEONTableExt, + NEON_TBX_2v = NEON_TBL_2v | NEONTableExt, + NEON_TBX_3v = NEON_TBL_3v | NEONTableExt, + NEON_TBX_4v = NEON_TBL_4v | NEONTableExt +}; + +// NEON perm. +enum NEONPermOp { + NEONPermFixed = 0x0E000800, + NEONPermFMask = 0xBF208C00, + NEONPermMask = 0x3F20FC00, + NEON_UZP1 = NEONPermFixed | 0x00001000, + NEON_TRN1 = NEONPermFixed | 0x00002000, + NEON_ZIP1 = NEONPermFixed | 0x00003000, + NEON_UZP2 = NEONPermFixed | 0x00005000, + NEON_TRN2 = NEONPermFixed | 0x00006000, + NEON_ZIP2 = NEONPermFixed | 0x00007000 +}; + +// NEON scalar instructions with two register operands. +enum NEONScalar2RegMiscOp { + NEONScalar2RegMiscFixed = 0x5E200800, + NEONScalar2RegMiscFMask = 0xDF3E0C00, + NEONScalar2RegMiscMask = NEON_Q | NEONScalar | NEON2RegMiscMask, + NEON_CMGT_zero_scalar = NEON_Q | NEONScalar | NEON_CMGT_zero, + NEON_CMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_CMEQ_zero, + NEON_CMLT_zero_scalar = NEON_Q | NEONScalar | NEON_CMLT_zero, + NEON_CMGE_zero_scalar = NEON_Q | NEONScalar | NEON_CMGE_zero, + NEON_CMLE_zero_scalar = NEON_Q | NEONScalar | NEON_CMLE_zero, + NEON_ABS_scalar = NEON_Q | NEONScalar | NEON_ABS, + NEON_SQABS_scalar = NEON_Q | NEONScalar | NEON_SQABS, + NEON_NEG_scalar = NEON_Q | NEONScalar | NEON_NEG, + NEON_SQNEG_scalar = NEON_Q | NEONScalar | NEON_SQNEG, + NEON_SQXTN_scalar = NEON_Q | NEONScalar | NEON_SQXTN, + NEON_UQXTN_scalar = NEON_Q | NEONScalar | NEON_UQXTN, + NEON_SQXTUN_scalar = NEON_Q | NEONScalar | NEON_SQXTUN, + NEON_SUQADD_scalar = NEON_Q | NEONScalar | NEON_SUQADD, + NEON_USQADD_scalar = NEON_Q | NEONScalar | NEON_USQADD, + + NEONScalar2RegMiscOpcode = NEON2RegMiscOpcode, + NEON_NEG_scalar_opcode = NEON_NEG_scalar & NEONScalar2RegMiscOpcode, + + NEONScalar2RegMiscFPMask = NEONScalar2RegMiscMask | 0x00800000, + NEON_FRSQRTE_scalar = NEON_Q | NEONScalar | NEON_FRSQRTE, + NEON_FRECPE_scalar = NEON_Q | NEONScalar | NEON_FRECPE, + NEON_SCVTF_scalar = NEON_Q | NEONScalar | NEON_SCVTF, + NEON_UCVTF_scalar = NEON_Q | NEONScalar | NEON_UCVTF, + NEON_FCMGT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGT_zero, + NEON_FCMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_FCMEQ_zero, + NEON_FCMLT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLT_zero, + NEON_FCMGE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGE_zero, + NEON_FCMLE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLE_zero, + NEON_FRECPX_scalar = NEONScalar2RegMiscFixed | 0x0081F000, + NEON_FCVTNS_scalar = NEON_Q | NEONScalar | NEON_FCVTNS, + NEON_FCVTNU_scalar = NEON_Q | NEONScalar | NEON_FCVTNU, + NEON_FCVTPS_scalar = NEON_Q | NEONScalar | NEON_FCVTPS, + NEON_FCVTPU_scalar = NEON_Q | NEONScalar | NEON_FCVTPU, + NEON_FCVTMS_scalar = NEON_Q | NEONScalar | NEON_FCVTMS, + NEON_FCVTMU_scalar = NEON_Q | NEONScalar | NEON_FCVTMU, + NEON_FCVTZS_scalar = NEON_Q | NEONScalar | NEON_FCVTZS, + NEON_FCVTZU_scalar = NEON_Q | NEONScalar | NEON_FCVTZU, + NEON_FCVTAS_scalar = NEON_Q | NEONScalar | NEON_FCVTAS, + NEON_FCVTAU_scalar = NEON_Q | NEONScalar | NEON_FCVTAU, + NEON_FCVTXN_scalar = NEON_Q | NEONScalar | NEON_FCVTXN +}; + +// NEON instructions with two register operands (FP16). +enum NEONScalar2RegMiscFP16Op { + NEONScalar2RegMiscFP16Fixed = 0x5E780800, + NEONScalar2RegMiscFP16FMask = 0xDF7E0C00, + NEONScalar2RegMiscFP16Mask = 0xFFFFFC00, + NEON_FCVTNS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTNS_H, + NEON_FCVTMS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTMS_H, + NEON_FCVTAS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTAS_H, + NEON_SCVTF_H_scalar = NEON_Q | NEONScalar | NEON_SCVTF_H, + NEON_FCMGT_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGT_H_zero, + NEON_FCMEQ_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMEQ_H_zero, + NEON_FCMLT_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLT_H_zero, + NEON_FCVTPS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTPS_H, + NEON_FCVTZS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTZS_H, + NEON_FRECPE_H_scalar = NEON_Q | NEONScalar | NEON_FRECPE_H, + NEON_FRECPX_H_scalar = NEONScalar2RegMiscFP16Fixed | 0x0081F000, + NEON_FCVTNU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTNU_H, + NEON_FCVTMU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTMU_H, + NEON_FCVTAU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTAU_H, + NEON_UCVTF_H_scalar = NEON_Q | NEONScalar | NEON_UCVTF_H, + NEON_FCMGE_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGE_H_zero, + NEON_FCMLE_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLE_H_zero, + NEON_FCVTPU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTPU_H, + NEON_FCVTZU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTZU_H, + NEON_FRSQRTE_H_scalar = NEON_Q | NEONScalar | NEON_FRSQRTE_H +}; + +// NEON scalar instructions with three same-type operands. +enum NEONScalar3SameOp { + NEONScalar3SameFixed = 0x5E200400, + NEONScalar3SameFMask = 0xDF200400, + NEONScalar3SameMask = 0xFF20FC00, + NEON_ADD_scalar = NEON_Q | NEONScalar | NEON_ADD, + NEON_CMEQ_scalar = NEON_Q | NEONScalar | NEON_CMEQ, + NEON_CMGE_scalar = NEON_Q | NEONScalar | NEON_CMGE, + NEON_CMGT_scalar = NEON_Q | NEONScalar | NEON_CMGT, + NEON_CMHI_scalar = NEON_Q | NEONScalar | NEON_CMHI, + NEON_CMHS_scalar = NEON_Q | NEONScalar | NEON_CMHS, + NEON_CMTST_scalar = NEON_Q | NEONScalar | NEON_CMTST, + NEON_SUB_scalar = NEON_Q | NEONScalar | NEON_SUB, + NEON_UQADD_scalar = NEON_Q | NEONScalar | NEON_UQADD, + NEON_SQADD_scalar = NEON_Q | NEONScalar | NEON_SQADD, + NEON_UQSUB_scalar = NEON_Q | NEONScalar | NEON_UQSUB, + NEON_SQSUB_scalar = NEON_Q | NEONScalar | NEON_SQSUB, + NEON_USHL_scalar = NEON_Q | NEONScalar | NEON_USHL, + NEON_SSHL_scalar = NEON_Q | NEONScalar | NEON_SSHL, + NEON_UQSHL_scalar = NEON_Q | NEONScalar | NEON_UQSHL, + NEON_SQSHL_scalar = NEON_Q | NEONScalar | NEON_SQSHL, + NEON_URSHL_scalar = NEON_Q | NEONScalar | NEON_URSHL, + NEON_SRSHL_scalar = NEON_Q | NEONScalar | NEON_SRSHL, + NEON_UQRSHL_scalar = NEON_Q | NEONScalar | NEON_UQRSHL, + NEON_SQRSHL_scalar = NEON_Q | NEONScalar | NEON_SQRSHL, + NEON_SQDMULH_scalar = NEON_Q | NEONScalar | NEON_SQDMULH, + NEON_SQRDMULH_scalar = NEON_Q | NEONScalar | NEON_SQRDMULH, + + // NEON floating point scalar instructions with three same-type operands. + NEONScalar3SameFPFixed = NEONScalar3SameFixed | 0x0000C000, + NEONScalar3SameFPFMask = NEONScalar3SameFMask | 0x0000C000, + NEONScalar3SameFPMask = NEONScalar3SameMask | 0x00800000, + NEON_FACGE_scalar = NEON_Q | NEONScalar | NEON_FACGE, + NEON_FACGT_scalar = NEON_Q | NEONScalar | NEON_FACGT, + NEON_FCMEQ_scalar = NEON_Q | NEONScalar | NEON_FCMEQ, + NEON_FCMGE_scalar = NEON_Q | NEONScalar | NEON_FCMGE, + NEON_FCMGT_scalar = NEON_Q | NEONScalar | NEON_FCMGT, + NEON_FMULX_scalar = NEON_Q | NEONScalar | NEON_FMULX, + NEON_FRECPS_scalar = NEON_Q | NEONScalar | NEON_FRECPS, + NEON_FRSQRTS_scalar = NEON_Q | NEONScalar | NEON_FRSQRTS, + NEON_FABD_scalar = NEON_Q | NEONScalar | NEON_FABD +}; + +// NEON scalar FP16 instructions with three same-type operands. +enum NEONScalar3SameFP16Op { + NEONScalar3SameFP16Fixed = 0x5E400400, + NEONScalar3SameFP16FMask = 0xDF60C400, + NEONScalar3SameFP16Mask = 0xFFE0FC00, + NEON_FABD_H_scalar = NEON_Q | NEONScalar | NEON_FABD_H, + NEON_FMULX_H_scalar = NEON_Q | NEONScalar | NEON_FMULX_H, + NEON_FCMEQ_H_scalar = NEON_Q | NEONScalar | NEON_FCMEQ_H, + NEON_FCMGE_H_scalar = NEON_Q | NEONScalar | NEON_FCMGE_H, + NEON_FCMGT_H_scalar = NEON_Q | NEONScalar | NEON_FCMGT_H, + NEON_FACGE_H_scalar = NEON_Q | NEONScalar | NEON_FACGE_H, + NEON_FACGT_H_scalar = NEON_Q | NEONScalar | NEON_FACGT_H, + NEON_FRECPS_H_scalar = NEON_Q | NEONScalar | NEON_FRECPS_H, + NEON_FRSQRTS_H_scalar = NEON_Q | NEONScalar | NEON_FRSQRTS_H +}; + +// 'Extra' NEON scalar instructions with three same-type operands. +enum NEONScalar3SameExtraOp { + NEONScalar3SameExtraFixed = 0x5E008400, + NEONScalar3SameExtraFMask = 0xDF208400, + NEONScalar3SameExtraMask = 0xFF20FC00, + NEON_SQRDMLAH_scalar = NEON_Q | NEONScalar | NEON_SQRDMLAH, + NEON_SQRDMLSH_scalar = NEON_Q | NEONScalar | NEON_SQRDMLSH +}; + +// NEON scalar instructions with three different-type operands. +enum NEONScalar3DiffOp { + NEONScalar3DiffFixed = 0x5E200000, + NEONScalar3DiffFMask = 0xDF200C00, + NEONScalar3DiffMask = NEON_Q | NEONScalar | NEON3DifferentMask, + NEON_SQDMLAL_scalar = NEON_Q | NEONScalar | NEON_SQDMLAL, + NEON_SQDMLSL_scalar = NEON_Q | NEONScalar | NEON_SQDMLSL, + NEON_SQDMULL_scalar = NEON_Q | NEONScalar | NEON_SQDMULL +}; + +// NEON scalar instructions with indexed element operand. +enum NEONScalarByIndexedElementOp { + NEONScalarByIndexedElementFixed = 0x5F000000, + NEONScalarByIndexedElementFMask = 0xDF000400, + NEONScalarByIndexedElementMask = 0xFF00F400, + NEON_SQDMLAL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMLAL_byelement, + NEON_SQDMLSL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMLSL_byelement, + NEON_SQDMULL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMULL_byelement, + NEON_SQDMULH_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMULH_byelement, + NEON_SQRDMULH_byelement_scalar + = NEON_Q | NEONScalar | NEON_SQRDMULH_byelement, + NEON_SQRDMLAH_byelement_scalar + = NEON_Q | NEONScalar | NEON_SQRDMLAH_byelement, + NEON_SQRDMLSH_byelement_scalar + = NEON_Q | NEONScalar | NEON_SQRDMLSH_byelement, + NEON_FMLA_H_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLA_H_byelement, + NEON_FMLS_H_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLS_H_byelement, + NEON_FMUL_H_byelement_scalar = NEON_Q | NEONScalar | NEON_FMUL_H_byelement, + NEON_FMULX_H_byelement_scalar = NEON_Q | NEONScalar | NEON_FMULX_H_byelement, + + // Floating point instructions. + NEONScalarByIndexedElementFPFixed + = NEONScalarByIndexedElementFixed | 0x00800000, + NEONScalarByIndexedElementFPMask + = NEONScalarByIndexedElementMask | 0x00800000, + NEON_FMLA_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLA_byelement, + NEON_FMLS_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLS_byelement, + NEON_FMUL_byelement_scalar = NEON_Q | NEONScalar | NEON_FMUL_byelement, + NEON_FMULX_byelement_scalar = NEON_Q | NEONScalar | NEON_FMULX_byelement +}; + +// NEON scalar register copy. +enum NEONScalarCopyOp { + NEONScalarCopyFixed = 0x5E000400, + NEONScalarCopyFMask = 0xDFE08400, + NEONScalarCopyMask = 0xFFE0FC00, + NEON_DUP_ELEMENT_scalar = NEON_Q | NEONScalar | NEON_DUP_ELEMENT +}; + +// NEON scalar pairwise instructions. +enum NEONScalarPairwiseOp { + NEONScalarPairwiseFixed = 0x5E300800, + NEONScalarPairwiseFMask = 0xDF3E0C00, + NEONScalarPairwiseMask = 0xFFB1F800, + NEON_ADDP_scalar = NEONScalarPairwiseFixed | 0x0081B000, + NEON_FMAXNMP_h_scalar = NEONScalarPairwiseFixed | 0x0000C000, + NEON_FADDP_h_scalar = NEONScalarPairwiseFixed | 0x0000D000, + NEON_FMAXP_h_scalar = NEONScalarPairwiseFixed | 0x0000F000, + NEON_FMINNMP_h_scalar = NEONScalarPairwiseFixed | 0x0080C000, + NEON_FMINP_h_scalar = NEONScalarPairwiseFixed | 0x0080F000, + NEON_FMAXNMP_scalar = NEONScalarPairwiseFixed | 0x2000C000, + NEON_FMINNMP_scalar = NEONScalarPairwiseFixed | 0x2080C000, + NEON_FADDP_scalar = NEONScalarPairwiseFixed | 0x2000D000, + NEON_FMAXP_scalar = NEONScalarPairwiseFixed | 0x2000F000, + NEON_FMINP_scalar = NEONScalarPairwiseFixed | 0x2080F000 +}; + +// NEON scalar shift immediate. +enum NEONScalarShiftImmediateOp { + NEONScalarShiftImmediateFixed = 0x5F000400, + NEONScalarShiftImmediateFMask = 0xDF800400, + NEONScalarShiftImmediateMask = 0xFF80FC00, + NEON_SHL_scalar = NEON_Q | NEONScalar | NEON_SHL, + NEON_SLI_scalar = NEON_Q | NEONScalar | NEON_SLI, + NEON_SRI_scalar = NEON_Q | NEONScalar | NEON_SRI, + NEON_SSHR_scalar = NEON_Q | NEONScalar | NEON_SSHR, + NEON_USHR_scalar = NEON_Q | NEONScalar | NEON_USHR, + NEON_SRSHR_scalar = NEON_Q | NEONScalar | NEON_SRSHR, + NEON_URSHR_scalar = NEON_Q | NEONScalar | NEON_URSHR, + NEON_SSRA_scalar = NEON_Q | NEONScalar | NEON_SSRA, + NEON_USRA_scalar = NEON_Q | NEONScalar | NEON_USRA, + NEON_SRSRA_scalar = NEON_Q | NEONScalar | NEON_SRSRA, + NEON_URSRA_scalar = NEON_Q | NEONScalar | NEON_URSRA, + NEON_UQSHRN_scalar = NEON_Q | NEONScalar | NEON_UQSHRN, + NEON_UQRSHRN_scalar = NEON_Q | NEONScalar | NEON_UQRSHRN, + NEON_SQSHRN_scalar = NEON_Q | NEONScalar | NEON_SQSHRN, + NEON_SQRSHRN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRN, + NEON_SQSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQSHRUN, + NEON_SQRSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRUN, + NEON_SQSHLU_scalar = NEON_Q | NEONScalar | NEON_SQSHLU, + NEON_SQSHL_imm_scalar = NEON_Q | NEONScalar | NEON_SQSHL_imm, + NEON_UQSHL_imm_scalar = NEON_Q | NEONScalar | NEON_UQSHL_imm, + NEON_SCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_SCVTF_imm, + NEON_UCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_UCVTF_imm, + NEON_FCVTZS_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZS_imm, + NEON_FCVTZU_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZU_imm +}; + +// Unimplemented and unallocated instructions. These are defined to make fixed +// bit assertion easier. +enum UnimplementedOp { + UnimplementedFixed = 0x00000000, + UnimplementedFMask = 0x00000000 +}; + +enum UnallocatedOp { + UnallocatedFixed = 0x00000000, + UnallocatedFMask = 0x00000000 +}; + +// Re-enable `clang-format` after the `enum`s. +// clang-format on + +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_AARCH64_CONSTANTS_AARCH64_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/cpu-aarch64.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/cpu-aarch64.cc new file mode 100644 index 00000000..97848653 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/cpu-aarch64.cc @@ -0,0 +1,178 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "../utils-vixl.h" + +#include "cpu-aarch64.h" + +namespace vixl { +namespace aarch64 { + +// Initialise to smallest possible cache size. +unsigned CPU::dcache_line_size_ = 1; +unsigned CPU::icache_line_size_ = 1; + + +// Currently computes I and D cache line size. +void CPU::SetUp() { + uint32_t cache_type_register = GetCacheType(); + + // The cache type register holds information about the caches, including I + // D caches line size. + static const int kDCacheLineSizeShift = 16; + static const int kICacheLineSizeShift = 0; + static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift; + static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift; + + // The cache type register holds the size of the I and D caches in words as + // a power of two. + uint32_t dcache_line_size_power_of_two = + (cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift; + uint32_t icache_line_size_power_of_two = + (cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift; + + dcache_line_size_ = 4 << dcache_line_size_power_of_two; + icache_line_size_ = 4 << icache_line_size_power_of_two; +} + + +uint32_t CPU::GetCacheType() { +#ifdef __aarch64__ + uint64_t cache_type_register; + // Copy the content of the cache type register to a core register. + __asm__ __volatile__("mrs %[ctr], ctr_el0" // NOLINT(runtime/references) + : [ctr] "=r"(cache_type_register)); + VIXL_ASSERT(IsUint32(cache_type_register)); + return static_cast(cache_type_register); +#else + // This will lead to a cache with 1 byte long lines, which is fine since + // neither EnsureIAndDCacheCoherency nor the simulator will need this + // information. + return 0; +#endif +} + + +void CPU::EnsureIAndDCacheCoherency(void *address, size_t length) { +#ifdef __aarch64__ + // Implement the cache synchronisation for all targets where AArch64 is the + // host, even if we're building the simulator for an AAarch64 host. This + // allows for cases where the user wants to simulate code as well as run it + // natively. + + if (length == 0) { + return; + } + + // The code below assumes user space cache operations are allowed. + + // Work out the line sizes for each cache, and use them to determine the + // start addresses. + uintptr_t start = reinterpret_cast(address); + uintptr_t dsize = static_cast(dcache_line_size_); + uintptr_t isize = static_cast(icache_line_size_); + uintptr_t dline = start & ~(dsize - 1); + uintptr_t iline = start & ~(isize - 1); + + // Cache line sizes are always a power of 2. + VIXL_ASSERT(IsPowerOf2(dsize)); + VIXL_ASSERT(IsPowerOf2(isize)); + uintptr_t end = start + length; + + do { + __asm__ __volatile__( + // Clean each line of the D cache containing the target data. + // + // dc : Data Cache maintenance + // c : Clean + // va : by (Virtual) Address + // u : to the point of Unification + // The point of unification for a processor is the point by which the + // instruction and data caches are guaranteed to see the same copy of a + // memory location. See ARM DDI 0406B page B2-12 for more information. + " dc cvau, %[dline]\n" + : + : [dline] "r"(dline) + // This code does not write to memory, but the "memory" dependency + // prevents GCC from reordering the code. + : "memory"); + dline += dsize; + } while (dline < end); + + __asm__ __volatile__( + // Make sure that the data cache operations (above) complete before the + // instruction cache operations (below). + // + // dsb : Data Synchronisation Barrier + // ish : Inner SHareable domain + // + // The point of unification for an Inner Shareable shareability domain is + // the point by which the instruction and data caches of all the + // processors + // in that Inner Shareable shareability domain are guaranteed to see the + // same copy of a memory location. See ARM DDI 0406B page B2-12 for more + // information. + " dsb ish\n" + : + : + : "memory"); + + do { + __asm__ __volatile__( + // Invalidate each line of the I cache containing the target data. + // + // ic : Instruction Cache maintenance + // i : Invalidate + // va : by Address + // u : to the point of Unification + " ic ivau, %[iline]\n" + : + : [iline] "r"(iline) + : "memory"); + iline += isize; + } while (iline < end); + + __asm__ __volatile__( + // Make sure that the instruction cache operations (above) take effect + // before the isb (below). + " dsb ish\n" + + // Ensure that any instructions already in the pipeline are discarded and + // reloaded from the new data. + // isb : Instruction Synchronisation Barrier + " isb\n" + : + : + : "memory"); +#else + // If the host isn't AArch64, we must be using the simulator, so this function + // doesn't have to do anything. + USE(address, length); +#endif +} + +} // namespace aarch64 +} // namespace vixl diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/cpu-aarch64.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/cpu-aarch64.h new file mode 100644 index 00000000..031fa42c --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/cpu-aarch64.h @@ -0,0 +1,86 @@ +// Copyright 2014, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_CPU_AARCH64_H +#define VIXL_CPU_AARCH64_H + +#include "../globals-vixl.h" + +#include "instructions-aarch64.h" + +namespace vixl { +namespace aarch64 { + +class CPU { + public: + // Initialise CPU support. + static void SetUp(); + + // Ensures the data at a given address and with a given size is the same for + // the I and D caches. I and D caches are not automatically coherent on ARM + // so this operation is required before any dynamically generated code can + // safely run. + static void EnsureIAndDCacheCoherency(void *address, size_t length); + + // Handle tagged pointers. + template + static T SetPointerTag(T pointer, uint64_t tag) { + VIXL_ASSERT(IsUintN(kAddressTagWidth, tag)); + + // Use C-style casts to get static_cast behaviour for integral types (T), + // and reinterpret_cast behaviour for other types. + + uint64_t raw = (uint64_t)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw)); + + raw = (raw & ~kAddressTagMask) | (tag << kAddressTagOffset); + return (T)raw; + } + + template + static uint64_t GetPointerTag(T pointer) { + // Use C-style casts to get static_cast behaviour for integral types (T), + // and reinterpret_cast behaviour for other types. + + uint64_t raw = (uint64_t)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw)); + + return (raw & kAddressTagMask) >> kAddressTagOffset; + } + + private: + // Return the content of the cache type register. + static uint32_t GetCacheType(); + + // I and D cache line size in bytes. + static unsigned icache_line_size_; + static unsigned dcache_line_size_; +}; + +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_CPU_AARCH64_H diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/cpu-features-auditor-aarch64.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/cpu-features-auditor-aarch64.cc new file mode 100644 index 00000000..75948ab4 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/cpu-features-auditor-aarch64.cc @@ -0,0 +1,1165 @@ +// Copyright 2018, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Arm Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "../cpu-features.h" +#include "../globals-vixl.h" +#include "../utils-vixl.h" +#include "decoder-aarch64.h" + +#include "cpu-features-auditor-aarch64.h" + +namespace vixl { +namespace aarch64 { + +// Every instruction must update last_instruction_, even if only to clear it, +// and every instruction must also update seen_ once it has been fully handled. +// This scope makes that simple, and allows early returns in the decode logic. +class CPUFeaturesAuditor::RecordInstructionFeaturesScope { + public: + explicit RecordInstructionFeaturesScope(CPUFeaturesAuditor* auditor) + : auditor_(auditor) { + auditor_->last_instruction_ = CPUFeatures::None(); + } + ~RecordInstructionFeaturesScope() { + auditor_->seen_.Combine(auditor_->last_instruction_); + } + + void Record(const CPUFeatures& features) { + auditor_->last_instruction_.Combine(features); + } + + void Record(CPUFeatures::Feature feature0, + CPUFeatures::Feature feature1 = CPUFeatures::kNone, + CPUFeatures::Feature feature2 = CPUFeatures::kNone, + CPUFeatures::Feature feature3 = CPUFeatures::kNone) { + auditor_->last_instruction_.Combine(feature0, feature1, feature2, feature3); + } + + // If exactly one of a or b is known to be available, record it. Otherwise, + // record both. This is intended for encodings that can be provided by two + // different features. + void RecordOneOrBothOf(CPUFeatures::Feature a, CPUFeatures::Feature b) { + bool hint_a = auditor_->available_.Has(a); + bool hint_b = auditor_->available_.Has(b); + if (hint_a && !hint_b) { + Record(a); + } else if (hint_b && !hint_a) { + Record(b); + } else { + Record(a, b); + } + } + + private: + CPUFeaturesAuditor* auditor_; +}; + +void CPUFeaturesAuditor::LoadStoreHelper(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(LoadStoreMask)) { + case LDR_b: + case LDR_q: + case STR_b: + case STR_q: + scope.Record(CPUFeatures::kNEON); + return; + case LDR_h: + case LDR_s: + case LDR_d: + case STR_h: + case STR_s: + case STR_d: + scope.RecordOneOrBothOf(CPUFeatures::kFP, CPUFeatures::kNEON); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::LoadStorePairHelper(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(LoadStorePairMask)) { + case LDP_q: + case STP_q: + scope.Record(CPUFeatures::kNEON); + return; + case LDP_s: + case LDP_d: + case STP_s: + case STP_d: { + scope.RecordOneOrBothOf(CPUFeatures::kFP, CPUFeatures::kNEON); + return; + } + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitAddSubExtended(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitAddSubImmediate(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitAddSubShifted(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitAddSubWithCarry(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitRotateRightIntoFlags(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(RotateRightIntoFlagsMask)) { + case RMIF: + scope.Record(CPUFeatures::kFlagM); + return; + } +} + +void CPUFeaturesAuditor::VisitEvaluateIntoFlags(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(EvaluateIntoFlagsMask)) { + case SETF8: + case SETF16: + scope.Record(CPUFeatures::kFlagM); + return; + } +} + +void CPUFeaturesAuditor::VisitAtomicMemory(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(AtomicMemoryMask)) { + case LDAPRB: + case LDAPRH: + case LDAPR_w: + case LDAPR_x: + scope.Record(CPUFeatures::kRCpc); + return; + default: + // Everything else belongs to the Atomics extension. + scope.Record(CPUFeatures::kAtomics); + return; + } +} + +void CPUFeaturesAuditor::VisitBitfield(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitCompareBranch(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitConditionalBranch(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitConditionalCompareImmediate( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitConditionalCompareRegister( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitConditionalSelect(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitCrypto2RegSHA(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitCrypto3RegSHA(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitCryptoAES(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitDataProcessing1Source(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(DataProcessing1SourceMask)) { + case PACIA: + case PACIB: + case PACDA: + case PACDB: + case AUTIA: + case AUTIB: + case AUTDA: + case AUTDB: + case PACIZA: + case PACIZB: + case PACDZA: + case PACDZB: + case AUTIZA: + case AUTIZB: + case AUTDZA: + case AUTDZB: + case XPACI: + case XPACD: + scope.Record(CPUFeatures::kPAuth); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitDataProcessing2Source(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(DataProcessing2SourceMask)) { + case CRC32B: + case CRC32H: + case CRC32W: + case CRC32X: + case CRC32CB: + case CRC32CH: + case CRC32CW: + case CRC32CX: + scope.Record(CPUFeatures::kCRC32); + return; + case PACGA: + scope.Record(CPUFeatures::kPAuth, CPUFeatures::kPAuthGeneric); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitLoadStoreRCpcUnscaledOffset( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(LoadStoreRCpcUnscaledOffsetMask)) { + case LDAPURB: + case LDAPURSB_w: + case LDAPURSB_x: + case LDAPURH: + case LDAPURSH_w: + case LDAPURSH_x: + case LDAPUR_w: + case LDAPURSW: + case LDAPUR_x: + scope.Record(CPUFeatures::kRCpc); + VIXL_FALLTHROUGH(); + case STLURB: + case STLURH: + case STLUR_w: + case STLUR_x: + scope.Record(CPUFeatures::kRCpcImm); + return; + } +} + +void CPUFeaturesAuditor::VisitLoadStorePAC(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); + scope.Record(CPUFeatures::kPAuth); +} + +void CPUFeaturesAuditor::VisitDataProcessing3Source(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitException(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitExtract(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitFPCompare(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require FP. + scope.Record(CPUFeatures::kFP); + switch (instr->Mask(FPCompareMask)) { + case FCMP_h: + case FCMP_h_zero: + case FCMPE_h: + case FCMPE_h_zero: + scope.Record(CPUFeatures::kFPHalf); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitFPConditionalCompare(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require FP. + scope.Record(CPUFeatures::kFP); + switch (instr->Mask(FPConditionalCompareMask)) { + case FCCMP_h: + case FCCMPE_h: + scope.Record(CPUFeatures::kFPHalf); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitFPConditionalSelect(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require FP. + scope.Record(CPUFeatures::kFP); + if (instr->Mask(FPConditionalSelectMask) == FCSEL_h) { + scope.Record(CPUFeatures::kFPHalf); + } +} + +void CPUFeaturesAuditor::VisitFPDataProcessing1Source( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require FP. + scope.Record(CPUFeatures::kFP); + switch (instr->Mask(FPDataProcessing1SourceMask)) { + case FMOV_h: + case FABS_h: + case FNEG_h: + case FSQRT_h: + case FRINTN_h: + case FRINTP_h: + case FRINTM_h: + case FRINTZ_h: + case FRINTA_h: + case FRINTX_h: + case FRINTI_h: + scope.Record(CPUFeatures::kFPHalf); + return; + default: + // No special CPU features. + // This category includes some half-precision FCVT instructions that do + // not require FPHalf. + return; + } +} + +void CPUFeaturesAuditor::VisitFPDataProcessing2Source( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require FP. + scope.Record(CPUFeatures::kFP); + switch (instr->Mask(FPDataProcessing2SourceMask)) { + case FMUL_h: + case FDIV_h: + case FADD_h: + case FSUB_h: + case FMAX_h: + case FMIN_h: + case FMAXNM_h: + case FMINNM_h: + case FNMUL_h: + scope.Record(CPUFeatures::kFPHalf); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitFPDataProcessing3Source( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require FP. + scope.Record(CPUFeatures::kFP); + switch (instr->Mask(FPDataProcessing3SourceMask)) { + case FMADD_h: + case FMSUB_h: + case FNMADD_h: + case FNMSUB_h: + scope.Record(CPUFeatures::kFPHalf); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitFPFixedPointConvert(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require FP. + scope.Record(CPUFeatures::kFP); + switch (instr->Mask(FPFixedPointConvertMask)) { + case FCVTZS_wh_fixed: + case FCVTZS_xh_fixed: + case FCVTZU_wh_fixed: + case FCVTZU_xh_fixed: + case SCVTF_hw_fixed: + case SCVTF_hx_fixed: + case UCVTF_hw_fixed: + case UCVTF_hx_fixed: + scope.Record(CPUFeatures::kFPHalf); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitFPImmediate(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require FP. + scope.Record(CPUFeatures::kFP); + if (instr->Mask(FPImmediateMask) == FMOV_h_imm) { + scope.Record(CPUFeatures::kFPHalf); + } +} + +void CPUFeaturesAuditor::VisitFPIntegerConvert(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require FP. + scope.Record(CPUFeatures::kFP); + switch (instr->Mask(FPIntegerConvertMask)) { + case FCVTAS_wh: + case FCVTAS_xh: + case FCVTAU_wh: + case FCVTAU_xh: + case FCVTMS_wh: + case FCVTMS_xh: + case FCVTMU_wh: + case FCVTMU_xh: + case FCVTNS_wh: + case FCVTNS_xh: + case FCVTNU_wh: + case FCVTNU_xh: + case FCVTPS_wh: + case FCVTPS_xh: + case FCVTPU_wh: + case FCVTPU_xh: + case FCVTZS_wh: + case FCVTZS_xh: + case FCVTZU_wh: + case FCVTZU_xh: + case FMOV_hw: + case FMOV_hx: + case FMOV_wh: + case FMOV_xh: + case SCVTF_hw: + case SCVTF_hx: + case UCVTF_hw: + case UCVTF_hx: + scope.Record(CPUFeatures::kFPHalf); + return; + case FMOV_d1_x: + case FMOV_x_d1: + scope.Record(CPUFeatures::kNEON); + return; + case FJCVTZS: + scope.Record(CPUFeatures::kJSCVT); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitLoadLiteral(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(LoadLiteralMask)) { + case LDR_s_lit: + case LDR_d_lit: + scope.RecordOneOrBothOf(CPUFeatures::kFP, CPUFeatures::kNEON); + return; + case LDR_q_lit: + scope.Record(CPUFeatures::kNEON); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitLoadStoreExclusive(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(LoadStoreExclusiveMask)) { + case CAS_w: + case CASA_w: + case CASL_w: + case CASAL_w: + case CAS_x: + case CASA_x: + case CASL_x: + case CASAL_x: + case CASB: + case CASAB: + case CASLB: + case CASALB: + case CASH: + case CASAH: + case CASLH: + case CASALH: + case CASP_w: + case CASPA_w: + case CASPL_w: + case CASPAL_w: + case CASP_x: + case CASPA_x: + case CASPL_x: + case CASPAL_x: + scope.Record(CPUFeatures::kAtomics); + return; + case STLLRB: + case LDLARB: + case STLLRH: + case LDLARH: + case STLLR_w: + case LDLAR_w: + case STLLR_x: + case LDLAR_x: + scope.Record(CPUFeatures::kLORegions); + return; + default: + // No special CPU features. + return; + } +} + +void CPUFeaturesAuditor::VisitLoadStorePairNonTemporal( + const Instruction* instr) { + LoadStorePairHelper(instr); +} + +void CPUFeaturesAuditor::VisitLoadStorePairOffset(const Instruction* instr) { + LoadStorePairHelper(instr); +} + +void CPUFeaturesAuditor::VisitLoadStorePairPostIndex(const Instruction* instr) { + LoadStorePairHelper(instr); +} + +void CPUFeaturesAuditor::VisitLoadStorePairPreIndex(const Instruction* instr) { + LoadStorePairHelper(instr); +} + +void CPUFeaturesAuditor::VisitLoadStorePostIndex(const Instruction* instr) { + LoadStoreHelper(instr); +} + +void CPUFeaturesAuditor::VisitLoadStorePreIndex(const Instruction* instr) { + LoadStoreHelper(instr); +} + +void CPUFeaturesAuditor::VisitLoadStoreRegisterOffset( + const Instruction* instr) { + LoadStoreHelper(instr); +} + +void CPUFeaturesAuditor::VisitLoadStoreUnscaledOffset( + const Instruction* instr) { + LoadStoreHelper(instr); +} + +void CPUFeaturesAuditor::VisitLoadStoreUnsignedOffset( + const Instruction* instr) { + LoadStoreHelper(instr); +} + +void CPUFeaturesAuditor::VisitLogicalImmediate(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitLogicalShifted(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitMoveWideImmediate(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEON2RegMisc(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + switch (instr->Mask(NEON2RegMiscFPMask)) { + case NEON_FABS: + case NEON_FNEG: + case NEON_FSQRT: + case NEON_FCVTL: + case NEON_FCVTN: + case NEON_FCVTXN: + case NEON_FRINTI: + case NEON_FRINTX: + case NEON_FRINTA: + case NEON_FRINTM: + case NEON_FRINTN: + case NEON_FRINTP: + case NEON_FRINTZ: + case NEON_FCVTNS: + case NEON_FCVTNU: + case NEON_FCVTPS: + case NEON_FCVTPU: + case NEON_FCVTMS: + case NEON_FCVTMU: + case NEON_FCVTZS: + case NEON_FCVTZU: + case NEON_FCVTAS: + case NEON_FCVTAU: + case NEON_SCVTF: + case NEON_UCVTF: + case NEON_FRSQRTE: + case NEON_FRECPE: + case NEON_FCMGT_zero: + case NEON_FCMGE_zero: + case NEON_FCMEQ_zero: + case NEON_FCMLE_zero: + case NEON_FCMLT_zero: + scope.Record(CPUFeatures::kFP); + return; + default: + // No additional features. + return; + } +} + +void CPUFeaturesAuditor::VisitNEON2RegMiscFP16(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEONHalf. + scope.Record(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kNEONHalf); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEON3Different(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEON3Same(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) { + scope.Record(CPUFeatures::kFP); + } + switch (instr->Mask(NEON3SameFHMMask)) { + case NEON_FMLAL: + case NEON_FMLAL2: + case NEON_FMLSL: + case NEON_FMLSL2: + scope.Record(CPUFeatures::kFP, CPUFeatures::kNEONHalf, CPUFeatures::kFHM); + return; + default: + // No additional features. + return; + } +} + +void CPUFeaturesAuditor::VisitNEON3SameExtra(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + if ((instr->Mask(NEON3SameExtraFCMLAMask) == NEON_FCMLA) || + (instr->Mask(NEON3SameExtraFCADDMask) == NEON_FCADD)) { + scope.Record(CPUFeatures::kFP, CPUFeatures::kFcma); + if (instr->GetNEONSize() == 1) scope.Record(CPUFeatures::kNEONHalf); + } else { + switch (instr->Mask(NEON3SameExtraMask)) { + case NEON_SDOT: + case NEON_UDOT: + scope.Record(CPUFeatures::kDotProduct); + return; + case NEON_SQRDMLAH: + case NEON_SQRDMLSH: + scope.Record(CPUFeatures::kRDM); + return; + default: + // No additional features. + return; + } + } +} + +void CPUFeaturesAuditor::VisitNEON3SameFP16(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON FP16 support. + scope.Record(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kNEONHalf); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONAcrossLanes(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + if (instr->Mask(NEONAcrossLanesFP16FMask) == NEONAcrossLanesFP16Fixed) { + // FMAXV_H, FMINV_H, FMAXNMV_H, FMINNMV_H + scope.Record(CPUFeatures::kFP, CPUFeatures::kNEONHalf); + } else if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) { + // FMAXV, FMINV, FMAXNMV, FMINNMV + scope.Record(CPUFeatures::kFP); + } +} + +void CPUFeaturesAuditor::VisitNEONByIndexedElement(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + switch (instr->Mask(NEONByIndexedElementMask)) { + case NEON_SDOT_byelement: + case NEON_UDOT_byelement: + scope.Record(CPUFeatures::kDotProduct); + return; + case NEON_SQRDMLAH_byelement: + case NEON_SQRDMLSH_byelement: + scope.Record(CPUFeatures::kRDM); + return; + default: + // Fall through to check other instructions. + break; + } + switch (instr->Mask(NEONByIndexedElementFPLongMask)) { + case NEON_FMLAL_H_byelement: + case NEON_FMLAL2_H_byelement: + case NEON_FMLSL_H_byelement: + case NEON_FMLSL2_H_byelement: + scope.Record(CPUFeatures::kFP, CPUFeatures::kNEONHalf, CPUFeatures::kFHM); + return; + default: + // Fall through to check other instructions. + break; + } + switch (instr->Mask(NEONByIndexedElementFPMask)) { + case NEON_FMLA_H_byelement: + case NEON_FMLS_H_byelement: + case NEON_FMUL_H_byelement: + case NEON_FMULX_H_byelement: + scope.Record(CPUFeatures::kNEONHalf); + VIXL_FALLTHROUGH(); + case NEON_FMLA_byelement: + case NEON_FMLS_byelement: + case NEON_FMUL_byelement: + case NEON_FMULX_byelement: + scope.Record(CPUFeatures::kFP); + return; + default: + switch (instr->Mask(NEONByIndexedElementFPComplexMask)) { + case NEON_FCMLA_byelement: + scope.Record(CPUFeatures::kFP, CPUFeatures::kFcma); + if (instr->GetNEONSize() == 1) scope.Record(CPUFeatures::kNEONHalf); + return; + } + // No additional features. + return; + } +} + +void CPUFeaturesAuditor::VisitNEONCopy(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONExtract(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONLoadStoreMultiStruct( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONLoadStoreMultiStructPostIndex( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONLoadStoreSingleStruct( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONLoadStoreSingleStructPostIndex( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONModifiedImmediate(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + if (instr->GetNEONCmode() == 0xf) { + // FMOV (vector, immediate), double-, single- or half-precision. + scope.Record(CPUFeatures::kFP); + if (instr->ExtractBit(11)) scope.Record(CPUFeatures::kNEONHalf); + } + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONPerm(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONScalar2RegMisc(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + switch (instr->Mask(NEONScalar2RegMiscFPMask)) { + case NEON_FRECPE_scalar: + case NEON_FRECPX_scalar: + case NEON_FRSQRTE_scalar: + case NEON_FCMGT_zero_scalar: + case NEON_FCMGE_zero_scalar: + case NEON_FCMEQ_zero_scalar: + case NEON_FCMLE_zero_scalar: + case NEON_FCMLT_zero_scalar: + case NEON_SCVTF_scalar: + case NEON_UCVTF_scalar: + case NEON_FCVTNS_scalar: + case NEON_FCVTNU_scalar: + case NEON_FCVTPS_scalar: + case NEON_FCVTPU_scalar: + case NEON_FCVTMS_scalar: + case NEON_FCVTMU_scalar: + case NEON_FCVTZS_scalar: + case NEON_FCVTZU_scalar: + case NEON_FCVTAS_scalar: + case NEON_FCVTAU_scalar: + case NEON_FCVTXN_scalar: + scope.Record(CPUFeatures::kFP); + return; + default: + // No additional features. + return; + } +} + +void CPUFeaturesAuditor::VisitNEONScalar2RegMiscFP16(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEONHalf. + scope.Record(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kNEONHalf); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONScalar3Diff(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONScalar3Same(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) { + scope.Record(CPUFeatures::kFP); + } +} + +void CPUFeaturesAuditor::VisitNEONScalar3SameExtra(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON and RDM. + scope.Record(CPUFeatures::kNEON, CPUFeatures::kRDM); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONScalar3SameFP16(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEONHalf. + scope.Record(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::kNEONHalf); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONScalarByIndexedElement( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + switch (instr->Mask(NEONScalarByIndexedElementMask)) { + case NEON_SQRDMLAH_byelement_scalar: + case NEON_SQRDMLSH_byelement_scalar: + scope.Record(CPUFeatures::kRDM); + return; + default: + switch (instr->Mask(NEONScalarByIndexedElementFPMask)) { + case NEON_FMLA_H_byelement_scalar: + case NEON_FMLS_H_byelement_scalar: + case NEON_FMUL_H_byelement_scalar: + case NEON_FMULX_H_byelement_scalar: + scope.Record(CPUFeatures::kNEONHalf); + VIXL_FALLTHROUGH(); + case NEON_FMLA_byelement_scalar: + case NEON_FMLS_byelement_scalar: + case NEON_FMUL_byelement_scalar: + case NEON_FMULX_byelement_scalar: + scope.Record(CPUFeatures::kFP); + return; + } + // No additional features. + return; + } +} + +void CPUFeaturesAuditor::VisitNEONScalarCopy(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitNEONScalarPairwise(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + switch (instr->Mask(NEONScalarPairwiseMask)) { + case NEON_FMAXNMP_h_scalar: + case NEON_FADDP_h_scalar: + case NEON_FMAXP_h_scalar: + case NEON_FMINNMP_h_scalar: + case NEON_FMINP_h_scalar: + scope.Record(CPUFeatures::kNEONHalf); + VIXL_FALLTHROUGH(); + case NEON_FADDP_scalar: + case NEON_FMAXP_scalar: + case NEON_FMAXNMP_scalar: + case NEON_FMINP_scalar: + case NEON_FMINNMP_scalar: + scope.Record(CPUFeatures::kFP); + return; + default: + // No additional features. + return; + } +} + +void CPUFeaturesAuditor::VisitNEONScalarShiftImmediate( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + switch (instr->Mask(NEONScalarShiftImmediateMask)) { + case NEON_FCVTZS_imm_scalar: + case NEON_FCVTZU_imm_scalar: + case NEON_SCVTF_imm_scalar: + case NEON_UCVTF_imm_scalar: + scope.Record(CPUFeatures::kFP); + // If immh is 0b001x then the data type is FP16, and requires kNEONHalf. + if ((instr->GetImmNEONImmh() & 0xe) == 0x2) { + scope.Record(CPUFeatures::kNEONHalf); + } + return; + default: + // No additional features. + return; + } +} + +void CPUFeaturesAuditor::VisitNEONShiftImmediate(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + switch (instr->Mask(NEONShiftImmediateMask)) { + case NEON_SCVTF_imm: + case NEON_UCVTF_imm: + case NEON_FCVTZS_imm: + case NEON_FCVTZU_imm: + scope.Record(CPUFeatures::kFP); + // If immh is 0b001x then the data type is FP16, and requires kNEONHalf. + if ((instr->GetImmNEONImmh() & 0xe) == 0x2) { + scope.Record(CPUFeatures::kNEONHalf); + } + return; + default: + // No additional features. + return; + } +} + +void CPUFeaturesAuditor::VisitNEONTable(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + // All of these instructions require NEON. + scope.Record(CPUFeatures::kNEON); + USE(instr); +} + +void CPUFeaturesAuditor::VisitPCRelAddressing(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitSystem(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + if (instr->Mask(SystemHintFMask) == SystemHintFixed) { + CPUFeatures required; + switch (instr->GetInstructionBits()) { + case PACIA1716: + case PACIB1716: + case AUTIA1716: + case AUTIB1716: + case PACIAZ: + case PACIASP: + case PACIBZ: + case PACIBSP: + case AUTIAZ: + case AUTIASP: + case AUTIBZ: + case AUTIBSP: + case XPACLRI: + required.Combine(CPUFeatures::kPAuth); + break; + default: + switch (instr->GetImmHint()) { + case ESB: + required.Combine(CPUFeatures::kRAS); + break; + case BTI: + case BTI_j: + case BTI_c: + case BTI_jc: + required.Combine(CPUFeatures::kBTI); + break; + default: + break; + } + break; + } + + // These are all HINT instructions, and behave as NOPs if the corresponding + // features are not implemented, so we record the corresponding features + // only if they are available. + if (available_.Has(required)) scope.Record(required); + } else if (instr->Mask(SystemSysMask) == SYS) { + switch (instr->GetSysOp()) { + // DC instruction variants. + case CVAP: + scope.Record(CPUFeatures::kDCPoP); + break; + case IVAU: + case CVAC: + case CVAU: + case CIVAC: + // No special CPU features. + break; + } + } else if (instr->Mask(SystemPStateFMask) == SystemPStateFixed) { + switch (instr->Mask(SystemPStateMask)) { + case CFINV: + scope.Record(CPUFeatures::kFlagM); + break; + case AXFLAG: + case XAFLAG: + scope.Record(CPUFeatures::kAXFlag); + break; + } + } +} + +void CPUFeaturesAuditor::VisitTestBranch(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitUnallocated(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitUnconditionalBranch(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + +void CPUFeaturesAuditor::VisitUnconditionalBranchToRegister( + const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + switch (instr->Mask(UnconditionalBranchToRegisterMask)) { + case BRAAZ: + case BRABZ: + case BLRAAZ: + case BLRABZ: + case RETAA: + case RETAB: + case BRAA: + case BRAB: + case BLRAA: + case BLRAB: + scope.Record(CPUFeatures::kPAuth); + return; + default: + // No additional features. + return; + } +} + +void CPUFeaturesAuditor::VisitUnimplemented(const Instruction* instr) { + RecordInstructionFeaturesScope scope(this); + USE(instr); +} + + +} // namespace aarch64 +} // namespace vixl diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/cpu-features-auditor-aarch64.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/cpu-features-auditor-aarch64.h new file mode 100644 index 00000000..9f034778 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/cpu-features-auditor-aarch64.h @@ -0,0 +1,125 @@ +// Copyright 2018, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Arm Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_ +#define VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_ + +#include + +#include "../cpu-features.h" +#include "decoder-aarch64.h" + +namespace vixl { +namespace aarch64 { + +// This visitor records the CPU features that each decoded instruction requires. +// It provides: +// - the set of CPU features required by the most recently decoded instruction, +// - a cumulative set of encountered CPU features, +// - an optional list of 'available' CPU features. +// +// Primarily, this allows the Disassembler and Simulator to share the same CPU +// features logic. However, it can be used standalone to scan code blocks for +// CPU features. +class CPUFeaturesAuditor : public DecoderVisitor { + public: + // Construction arguments: + // - If a decoder is specified, the CPUFeaturesAuditor automatically + // registers itself as a visitor. Otherwise, this can be done manually. + // + // - If an `available` features list is provided, it is used as a hint in + // cases where instructions may be provided by multiple separate features. + // An example of this is FP&SIMD loads and stores: some of these are used + // in both FP and integer SIMD code. If exactly one of those features is + // in `available` when one of these instructions is encountered, then the + // auditor will record that feature. Otherwise, it will record _both_ + // features. + explicit CPUFeaturesAuditor( + Decoder* decoder, const CPUFeatures& available = CPUFeatures::None()) + : available_(available), decoder_(decoder) { + if (decoder_ != NULL) decoder_->AppendVisitor(this); + } + + explicit CPUFeaturesAuditor( + const CPUFeatures& available = CPUFeatures::None()) + : available_(available), decoder_(NULL) {} + + virtual ~CPUFeaturesAuditor() { + if (decoder_ != NULL) decoder_->RemoveVisitor(this); + } + + void ResetSeenFeatures() { + seen_ = CPUFeatures::None(); + last_instruction_ = CPUFeatures::None(); + } + + // Query or set available CPUFeatures. + const CPUFeatures& GetAvailableFeatures() const { return available_; } + void SetAvailableFeatures(const CPUFeatures& available) { + available_ = available; + } + + // Query CPUFeatures seen since construction (or the last call to `Reset()`). + const CPUFeatures& GetSeenFeatures() const { return seen_; } + + // Query CPUFeatures from the last instruction visited by this auditor. + const CPUFeatures& GetInstructionFeatures() const { + return last_instruction_; + } + + bool InstructionIsAvailable() const { + return available_.Has(last_instruction_); + } + + // The common CPUFeatures interface operates on the available_ list. + CPUFeatures* GetCPUFeatures() { return &available_; } + void SetCPUFeatures(const CPUFeatures& available) { + SetAvailableFeatures(available); + } + +// Declare all Visitor functions. +#define DECLARE(A) \ + virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE; + VISITOR_LIST(DECLARE) +#undef DECLARE + + private: + class RecordInstructionFeaturesScope; + + void LoadStoreHelper(const Instruction* instr); + void LoadStorePairHelper(const Instruction* instr); + + CPUFeatures seen_; + CPUFeatures last_instruction_; + CPUFeatures available_; + + Decoder* decoder_; +}; + +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/decoder-aarch64.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/decoder-aarch64.cc new file mode 100644 index 00000000..a01890ce --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/decoder-aarch64.cc @@ -0,0 +1,1067 @@ +// Copyright 2014, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "../globals-vixl.h" +#include "../utils-vixl.h" + +#include "decoder-aarch64.h" + +namespace vixl { +namespace aarch64 { + +void Decoder::DecodeInstruction(const Instruction* instr) { + if (instr->ExtractBits(28, 27) == 0) { + VisitUnallocated(instr); + } else { + switch (instr->ExtractBits(27, 24)) { + // 0: PC relative addressing. + case 0x0: + DecodePCRelAddressing(instr); + break; + + // 1: Add/sub immediate. + case 0x1: + DecodeAddSubImmediate(instr); + break; + + // A: Logical shifted register. + // Add/sub with carry. + // Conditional compare register. + // Conditional compare immediate. + // Conditional select. + // Data processing 1 source. + // Data processing 2 source. + // B: Add/sub shifted register. + // Add/sub extended register. + // Data processing 3 source. + case 0xA: + case 0xB: + DecodeDataProcessing(instr); + break; + + // 2: Logical immediate. + // Move wide immediate. + case 0x2: + DecodeLogical(instr); + break; + + // 3: Bitfield. + // Extract. + case 0x3: + DecodeBitfieldExtract(instr); + break; + + // 4: Unconditional branch immediate. + // Exception generation. + // Compare and branch immediate. + // 5: Compare and branch immediate. + // Conditional branch. + // System. + // 6,7: Unconditional branch. + // Test and branch immediate. + case 0x4: + case 0x5: + case 0x6: + case 0x7: + DecodeBranchSystemException(instr); + break; + + // 8,9: Load/store register pair post-index. + // Load register literal. + // Load/store register unscaled immediate. + // Load/store register immediate post-index. + // Load/store register immediate pre-index. + // Load/store register offset. + // Load/store exclusive. + // C,D: Load/store register pair offset. + // Load/store register pair pre-index. + // Load/store register unsigned immediate. + // Advanced SIMD. + case 0x8: + case 0x9: + case 0xC: + case 0xD: + DecodeLoadStore(instr); + break; + + // E: FP fixed point conversion. + // FP integer conversion. + // FP data processing 1 source. + // FP compare. + // FP immediate. + // FP data processing 2 source. + // FP conditional compare. + // FP conditional select. + // Advanced SIMD. + // F: FP data processing 3 source. + // Advanced SIMD. + case 0xE: + case 0xF: + DecodeFP(instr); + break; + } + } +} + +void Decoder::AppendVisitor(DecoderVisitor* new_visitor) { + visitors_.push_back(new_visitor); +} + + +void Decoder::PrependVisitor(DecoderVisitor* new_visitor) { + visitors_.push_front(new_visitor); +} + + +void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor, + DecoderVisitor* registered_visitor) { + std::list::iterator it; + for (it = visitors_.begin(); it != visitors_.end(); it++) { + if (*it == registered_visitor) { + visitors_.insert(it, new_visitor); + return; + } + } + // We reached the end of the list. The last element must be + // registered_visitor. + VIXL_ASSERT(*it == registered_visitor); + visitors_.insert(it, new_visitor); +} + + +void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor, + DecoderVisitor* registered_visitor) { + std::list::iterator it; + for (it = visitors_.begin(); it != visitors_.end(); it++) { + if (*it == registered_visitor) { + it++; + visitors_.insert(it, new_visitor); + return; + } + } + // We reached the end of the list. The last element must be + // registered_visitor. + VIXL_ASSERT(*it == registered_visitor); + visitors_.push_back(new_visitor); +} + + +void Decoder::RemoveVisitor(DecoderVisitor* visitor) { + visitors_.remove(visitor); +} + + +void Decoder::DecodePCRelAddressing(const Instruction* instr) { + VIXL_ASSERT(instr->ExtractBits(27, 24) == 0x0); + // We know bit 28 is set, as = 0 is filtered out at the top level + // decode. + VIXL_ASSERT(instr->ExtractBit(28) == 0x1); + VisitPCRelAddressing(instr); +} + + +void Decoder::DecodeBranchSystemException(const Instruction* instr) { + VIXL_ASSERT((instr->ExtractBits(27, 24) == 0x4) || + (instr->ExtractBits(27, 24) == 0x5) || + (instr->ExtractBits(27, 24) == 0x6) || + (instr->ExtractBits(27, 24) == 0x7)); + + switch (instr->ExtractBits(31, 29)) { + case 0: + case 4: { + VisitUnconditionalBranch(instr); + break; + } + case 1: + case 5: { + if (instr->ExtractBit(25) == 0) { + VisitCompareBranch(instr); + } else { + VisitTestBranch(instr); + } + break; + } + case 2: { + if (instr->ExtractBit(25) == 0) { + if ((instr->ExtractBit(24) == 0x1) || + (instr->Mask(0x01000010) == 0x00000010)) { + VisitUnallocated(instr); + } else { + VisitConditionalBranch(instr); + } + } else { + VisitUnallocated(instr); + } + break; + } + case 6: { + if (instr->ExtractBit(25) == 0) { + if (instr->ExtractBit(24) == 0) { + if ((instr->ExtractBits(4, 2) != 0) || + (instr->Mask(0x00E0001D) == 0x00200001) || + (instr->Mask(0x00E0001D) == 0x00400001) || + (instr->Mask(0x00E0001E) == 0x00200002) || + (instr->Mask(0x00E0001E) == 0x00400002) || + (instr->Mask(0x00E0001C) == 0x00600000) || + (instr->Mask(0x00E0001C) == 0x00800000) || + (instr->Mask(0x00E0001F) == 0x00A00000) || + (instr->Mask(0x00C0001C) == 0x00C00000)) { + VisitUnallocated(instr); + } else { + VisitException(instr); + } + } else { + if (instr->ExtractBits(23, 22) == 0) { + const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0); + if ((instr->ExtractBits(21, 19) == 0x4) || + (masked_003FF0E0 == 0x00033000) || + (masked_003FF0E0 == 0x003FF020) || + (masked_003FF0E0 == 0x003FF060) || + (masked_003FF0E0 == 0x003FF0E0) || + (instr->Mask(0x00388000) == 0x00008000) || + (instr->Mask(0x0038E000) == 0x00000000) || + (instr->Mask(0x0039E000) == 0x00002000) || + (instr->Mask(0x003AE000) == 0x00002000) || + (instr->Mask(0x003CE000) == 0x00042000) || + (instr->Mask(0x0038F000) == 0x00005000) || + (instr->Mask(0x0038E000) == 0x00006000)) { + VisitUnallocated(instr); + } else { + VisitSystem(instr); + } + } else { + VisitUnallocated(instr); + } + } + } else { + if (((instr->ExtractBit(24) == 0x1) && + (instr->ExtractBits(23, 21) > 0x1)) || + (instr->ExtractBits(20, 16) != 0x1F) || + (instr->ExtractBits(15, 10) == 0x1) || + (instr->ExtractBits(15, 10) > 0x3) || + (instr->ExtractBits(24, 21) == 0x3) || + (instr->ExtractBits(24, 22) == 0x3)) { + VisitUnallocated(instr); + } else { + VisitUnconditionalBranchToRegister(instr); + } + } + break; + } + case 3: + case 7: { + VisitUnallocated(instr); + break; + } + } +} + + +void Decoder::DecodeLoadStore(const Instruction* instr) { + VIXL_ASSERT((instr->ExtractBits(27, 24) == 0x8) || + (instr->ExtractBits(27, 24) == 0x9) || + (instr->ExtractBits(27, 24) == 0xC) || + (instr->ExtractBits(27, 24) == 0xD)); + // TODO(all): rearrange the tree to integrate this branch. + if ((instr->ExtractBit(28) == 0) && (instr->ExtractBit(29) == 0) && + (instr->ExtractBit(26) == 1)) { + DecodeNEONLoadStore(instr); + return; + } + + if (instr->ExtractBit(24) == 0) { + if (instr->ExtractBit(28) == 0) { + if (instr->ExtractBit(29) == 0) { + if (instr->ExtractBit(26) == 0) { + VisitLoadStoreExclusive(instr); + } else { + VIXL_UNREACHABLE(); + } + } else { + if ((instr->ExtractBits(31, 30) == 0x3) || + (instr->Mask(0xC4400000) == 0x40000000)) { + VisitUnallocated(instr); + } else { + if (instr->ExtractBit(23) == 0) { + if (instr->Mask(0xC4400000) == 0xC0400000) { + VisitUnallocated(instr); + } else { + VisitLoadStorePairNonTemporal(instr); + } + } else { + VisitLoadStorePairPostIndex(instr); + } + } + } + } else { + if (instr->ExtractBit(29) == 0) { + if (instr->Mask(0xC4000000) == 0xC4000000) { + VisitUnallocated(instr); + } else { + VisitLoadLiteral(instr); + } + } else { + if ((instr->Mask(0x44800000) == 0x44800000) || + (instr->Mask(0x84800000) == 0x84800000)) { + VisitUnallocated(instr); + } else { + if (instr->ExtractBit(21) == 0) { + switch (instr->ExtractBits(11, 10)) { + case 0: { + VisitLoadStoreUnscaledOffset(instr); + break; + } + case 1: { + if (instr->Mask(0xC4C00000) == 0xC0800000) { + VisitUnallocated(instr); + } else { + VisitLoadStorePostIndex(instr); + } + break; + } + case 2: { + // TODO: VisitLoadStoreRegisterOffsetUnpriv. + VisitUnimplemented(instr); + break; + } + case 3: { + if (instr->Mask(0xC4C00000) == 0xC0800000) { + VisitUnallocated(instr); + } else { + VisitLoadStorePreIndex(instr); + } + break; + } + } + } else { + if (instr->ExtractBits(11, 10) == 0x2) { + if (instr->ExtractBit(14) == 0) { + VisitUnallocated(instr); + } else { + VisitLoadStoreRegisterOffset(instr); + } + } else { + if (instr->ExtractBits(11, 10) == 0x0) { + if (instr->ExtractBit(25) == 0) { + if (instr->ExtractBit(26) == 0) { + if ((instr->ExtractBit(15) == 1) && + ((instr->ExtractBits(14, 12) == 0x1) || + (instr->ExtractBit(13) == 1) || + (instr->ExtractBits(14, 12) == 0x5) || + ((instr->ExtractBits(14, 12) == 0x4) && + ((instr->ExtractBit(23) == 0) || + (instr->ExtractBits(23, 22) == 0x3))))) { + VisitUnallocated(instr); + } else { + VisitAtomicMemory(instr); + } + } else { + VisitUnallocated(instr); + } + } else { + VisitUnallocated(instr); + } + } else { + if (instr->ExtractBit(25) == 0) { + if (instr->ExtractBit(26) == 0) { + if (instr->ExtractBits(31, 30) == 0x3) { + VisitLoadStorePAC(instr); + } else { + VisitUnallocated(instr); + } + } else { + VisitUnallocated(instr); + } + } else { + VisitUnallocated(instr); + } + } + } + } + } + } + } + } else { + if (instr->ExtractBit(28) == 0) { + if (instr->ExtractBit(29) == 0) { + VisitUnallocated(instr); + } else { + if ((instr->ExtractBits(31, 30) == 0x3) || + (instr->Mask(0xC4400000) == 0x40000000)) { + VisitUnallocated(instr); + } else { + if (instr->ExtractBit(23) == 0) { + VisitLoadStorePairOffset(instr); + } else { + VisitLoadStorePairPreIndex(instr); + } + } + } + } else { + if (instr->ExtractBit(29) == 0) { + if ((instr->ExtractBit(26) == 0) && (instr->ExtractBit(21) == 0) && + (instr->ExtractBits(11, 10) == 0x0) && + ((instr->ExtractBits(31, 30) < 0x2) || + ((instr->ExtractBits(31, 30) == 0x2) && + (instr->ExtractBits(23, 22) != 0x3)) || + ((instr->ExtractBits(31, 30) == 0x3) && + (instr->ExtractBits(23, 22) < 0x2)))) { + VisitLoadStoreRCpcUnscaledOffset(instr); + } else { + VisitUnallocated(instr); + } + } else { + if ((instr->Mask(0x84C00000) == 0x80C00000) || + (instr->Mask(0x44800000) == 0x44800000) || + (instr->Mask(0x84800000) == 0x84800000)) { + VisitUnallocated(instr); + } else { + VisitLoadStoreUnsignedOffset(instr); + } + } + } + } +} + + +void Decoder::DecodeLogical(const Instruction* instr) { + VIXL_ASSERT(instr->ExtractBits(27, 24) == 0x2); + + if (instr->Mask(0x80400000) == 0x00400000) { + VisitUnallocated(instr); + } else { + if (instr->ExtractBit(23) == 0) { + VisitLogicalImmediate(instr); + } else { + if (instr->ExtractBits(30, 29) == 0x1) { + VisitUnallocated(instr); + } else { + VisitMoveWideImmediate(instr); + } + } + } +} + + +void Decoder::DecodeBitfieldExtract(const Instruction* instr) { + VIXL_ASSERT(instr->ExtractBits(27, 24) == 0x3); + + if ((instr->Mask(0x80400000) == 0x80000000) || + (instr->Mask(0x80400000) == 0x00400000) || + (instr->Mask(0x80008000) == 0x00008000)) { + VisitUnallocated(instr); + } else if (instr->ExtractBit(23) == 0) { + if ((instr->Mask(0x80200000) == 0x00200000) || + (instr->Mask(0x60000000) == 0x60000000)) { + VisitUnallocated(instr); + } else { + VisitBitfield(instr); + } + } else { + if ((instr->Mask(0x60200000) == 0x00200000) || + (instr->Mask(0x60000000) != 0x00000000)) { + VisitUnallocated(instr); + } else { + VisitExtract(instr); + } + } +} + + +void Decoder::DecodeAddSubImmediate(const Instruction* instr) { + VIXL_ASSERT(instr->ExtractBits(27, 24) == 0x1); + if (instr->ExtractBit(23) == 1) { + VisitUnallocated(instr); + } else { + VisitAddSubImmediate(instr); + } +} + + +void Decoder::DecodeDataProcessing(const Instruction* instr) { + VIXL_ASSERT((instr->ExtractBits(27, 24) == 0xA) || + (instr->ExtractBits(27, 24) == 0xB)); + + if (instr->ExtractBit(24) == 0) { + if (instr->ExtractBit(28) == 0) { + if (instr->Mask(0x80008000) == 0x00008000) { + VisitUnallocated(instr); + } else { + VisitLogicalShifted(instr); + } + } else { + switch (instr->ExtractBits(23, 21)) { + case 0: { + if (instr->ExtractBits(15, 10) != 0) { + if (instr->ExtractBits(14, 10) == 0x1) { + if (instr->Mask(0xE0000010) == 0xA0000000) { + VisitRotateRightIntoFlags(instr); + } else { + VisitUnallocated(instr); + } + } else { + if (instr->ExtractBits(13, 10) == 0x2) { + if (instr->Mask(0xE01F801F) == 0x2000000D) { + VisitEvaluateIntoFlags(instr); + } else { + VisitUnallocated(instr); + } + } else { + VisitUnallocated(instr); + } + } + } else { + VisitAddSubWithCarry(instr); + } + break; + } + case 2: { + if ((instr->ExtractBit(29) == 0) || (instr->Mask(0x00000410) != 0)) { + VisitUnallocated(instr); + } else { + if (instr->ExtractBit(11) == 0) { + VisitConditionalCompareRegister(instr); + } else { + VisitConditionalCompareImmediate(instr); + } + } + break; + } + case 4: { + if (instr->Mask(0x20000800) != 0x00000000) { + VisitUnallocated(instr); + } else { + VisitConditionalSelect(instr); + } + break; + } + case 6: { + if (instr->ExtractBit(29) == 0x1) { + VisitUnallocated(instr); + VIXL_FALLTHROUGH(); + } else { + if (instr->ExtractBit(30) == 0) { + if ((instr->ExtractBit(15) == 0x1) || + (instr->ExtractBits(15, 11) == 0) || + (instr->ExtractBits(15, 12) == 0x1) || + ((instr->ExtractBits(15, 12) == 0x3) && + (instr->ExtractBit(31) == 0)) || + (instr->ExtractBits(15, 13) == 0x3) || + (instr->Mask(0x8000EC00) == 0x00004C00) || + (instr->Mask(0x8000E800) == 0x80004000) || + (instr->Mask(0x8000E400) == 0x80004000)) { + VisitUnallocated(instr); + } else { + VisitDataProcessing2Source(instr); + } + } else { + if ((instr->ExtractBits(20, 17) != 0) || + (instr->ExtractBit(15) == 1) || + ((instr->ExtractBit(16) == 1) && + ((instr->ExtractBits(14, 10) > 17) || + (instr->ExtractBit(31) == 0))) || + ((instr->ExtractBit(16) == 0) && + ((instr->ExtractBits(14, 13) != 0) || + (instr->Mask(0xA01FFC00) == 0x00000C00) || + (instr->Mask(0x201FF800) == 0x00001800)))) { + VisitUnallocated(instr); + } else { + VisitDataProcessing1Source(instr); + } + } + break; + } + } + case 1: + case 3: + case 5: + case 7: + VisitUnallocated(instr); + break; + } + } + } else { + if (instr->ExtractBit(28) == 0) { + if (instr->ExtractBit(21) == 0) { + if ((instr->ExtractBits(23, 22) == 0x3) || + (instr->Mask(0x80008000) == 0x00008000)) { + VisitUnallocated(instr); + } else { + VisitAddSubShifted(instr); + } + } else { + if ((instr->Mask(0x00C00000) != 0x00000000) || + (instr->Mask(0x00001400) == 0x00001400) || + (instr->Mask(0x00001800) == 0x00001800)) { + VisitUnallocated(instr); + } else { + VisitAddSubExtended(instr); + } + } + } else { + if ((instr->ExtractBit(30) == 0x1) || + (instr->ExtractBits(30, 29) == 0x1) || + (instr->Mask(0xE0600000) == 0x00200000) || + (instr->Mask(0xE0608000) == 0x00400000) || + (instr->Mask(0x60608000) == 0x00408000) || + (instr->Mask(0x60E00000) == 0x00E00000) || + (instr->Mask(0x60E00000) == 0x00800000) || + (instr->Mask(0x60E00000) == 0x00600000)) { + VisitUnallocated(instr); + } else { + VisitDataProcessing3Source(instr); + } + } + } +} + + +void Decoder::DecodeFP(const Instruction* instr) { + VIXL_ASSERT((instr->ExtractBits(27, 24) == 0xE) || + (instr->ExtractBits(27, 24) == 0xF)); + if (instr->ExtractBit(28) == 0) { + DecodeNEONVectorDataProcessing(instr); + } else { + if (instr->ExtractBits(31, 30) == 0x3) { + VisitUnallocated(instr); + } else if (instr->ExtractBits(31, 30) == 0x1) { + DecodeNEONScalarDataProcessing(instr); + } else { + if (instr->ExtractBit(29) == 0) { + if (instr->ExtractBit(24) == 0) { + if (instr->ExtractBit(21) == 0) { + if ((instr->ExtractBits(23, 22) == 0x2) || + (instr->ExtractBit(18) == 1) || + (instr->Mask(0x80008000) == 0x00000000) || + (instr->Mask(0x000E0000) == 0x00000000) || + (instr->Mask(0x000E0000) == 0x000A0000) || + (instr->Mask(0x00160000) == 0x00000000) || + (instr->Mask(0x00160000) == 0x00120000)) { + VisitUnallocated(instr); + } else { + VisitFPFixedPointConvert(instr); + } + } else { + if (instr->ExtractBits(15, 10) == 32) { + VisitUnallocated(instr); + } else if (instr->ExtractBits(15, 10) == 0) { + if ((instr->Mask(0x000E0000) == 0x000A0000) || + (instr->Mask(0x000E0000) == 0x000C0000) || + (instr->Mask(0x00160000) == 0x00120000) || + (instr->Mask(0x00160000) == 0x00140000) || + (instr->Mask(0x20C40000) == 0x00800000) || + (instr->Mask(0x20C60000) == 0x00840000) || + (instr->Mask(0xA0C60000) == 0x80060000) || + (instr->Mask(0xA0C60000) == 0x00860000) || + (instr->Mask(0xA0CE0000) == 0x80860000) || + (instr->Mask(0xA0CE0000) == 0x804E0000) || + (instr->Mask(0xA0CE0000) == 0x000E0000) || + (instr->Mask(0xA0D60000) == 0x00160000) || + (instr->Mask(0xA0D60000) == 0x80560000) || + (instr->Mask(0xA0D60000) == 0x80960000)) { + VisitUnallocated(instr); + } else { + VisitFPIntegerConvert(instr); + } + } else if (instr->ExtractBits(14, 10) == 16) { + const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000); + if ((instr->Mask(0x80180000) != 0) || + (masked_A0DF8000 == 0x00020000) || + (masked_A0DF8000 == 0x00030000) || + (masked_A0DF8000 == 0x00068000) || + (masked_A0DF8000 == 0x00428000) || + (masked_A0DF8000 == 0x00430000) || + (masked_A0DF8000 == 0x00468000) || + (instr->Mask(0xA0D80000) == 0x00800000) || + (instr->Mask(0xA0DF0000) == 0x00C30000) || + (instr->Mask(0xA0DF8000) == 0x00C68000)) { + VisitUnallocated(instr); + } else { + VisitFPDataProcessing1Source(instr); + } + } else if (instr->ExtractBits(13, 10) == 8) { + if ((instr->ExtractBits(15, 14) != 0) || + (instr->ExtractBits(2, 0) != 0) || + (instr->ExtractBit(31) == 1) || + (instr->ExtractBits(23, 22) == 0x2)) { + VisitUnallocated(instr); + } else { + VisitFPCompare(instr); + } + } else if (instr->ExtractBits(12, 10) == 4) { + if ((instr->ExtractBits(9, 5) != 0) || + // Valid enc: 01d, 00s, 11h. + (instr->ExtractBits(23, 22) == 0x2) || + (instr->ExtractBit(31) == 1)) { + VisitUnallocated(instr); + } else { + VisitFPImmediate(instr); + } + } else { + if ((instr->ExtractBits(23, 22) == 0x2) || + (instr->ExtractBit(31) == 1)) { + VisitUnallocated(instr); + } else { + switch (instr->ExtractBits(11, 10)) { + case 1: { + VisitFPConditionalCompare(instr); + break; + } + case 2: { + if (instr->ExtractBits(15, 12) > 0x8) { + VisitUnallocated(instr); + } else { + VisitFPDataProcessing2Source(instr); + } + break; + } + case 3: { + VisitFPConditionalSelect(instr); + break; + } + default: + VIXL_UNREACHABLE(); + } + } + } + } + } else { + // Bit 30 == 1 has been handled earlier. + VIXL_ASSERT(instr->ExtractBit(30) == 0); + if ((instr->Mask(0xA0000000) != 0) || + (instr->ExtractBits(23, 22) == 0x2)) { + VisitUnallocated(instr); + } else { + VisitFPDataProcessing3Source(instr); + } + } + } else { + VisitUnallocated(instr); + } + } + } +} + + +void Decoder::DecodeNEONLoadStore(const Instruction* instr) { + VIXL_ASSERT(instr->ExtractBits(29, 25) == 0x6); + if (instr->ExtractBit(31) == 0) { + if ((instr->ExtractBit(24) == 0) && (instr->ExtractBit(21) == 1)) { + VisitUnallocated(instr); + return; + } + + if (instr->ExtractBit(23) == 0) { + if (instr->ExtractBits(20, 16) == 0) { + if (instr->ExtractBit(24) == 0) { + VisitNEONLoadStoreMultiStruct(instr); + } else { + VisitNEONLoadStoreSingleStruct(instr); + } + } else { + VisitUnallocated(instr); + } + } else { + if (instr->ExtractBit(24) == 0) { + VisitNEONLoadStoreMultiStructPostIndex(instr); + } else { + VisitNEONLoadStoreSingleStructPostIndex(instr); + } + } + } else { + VisitUnallocated(instr); + } +} + + +void Decoder::DecodeNEONVectorDataProcessing(const Instruction* instr) { + VIXL_ASSERT(instr->ExtractBits(28, 25) == 0x7); + if (instr->ExtractBit(31) == 0) { + if (instr->ExtractBit(24) == 0) { + if (instr->ExtractBit(21) == 0) { + if (instr->ExtractBit(15) == 0) { + if (instr->ExtractBit(10) == 0) { + if (instr->ExtractBit(29) == 0) { + if (instr->ExtractBit(11) == 0) { + VisitNEONTable(instr); + } else { + VisitNEONPerm(instr); + } + } else { + VisitNEONExtract(instr); + } + } else { + if (instr->ExtractBits(23, 22) == 0) { + VisitNEONCopy(instr); + } else if (instr->ExtractBit(14) == 0x0 && + instr->ExtractBit(22) == 0x1) { + // U + a + opcode. + uint8_t decode_field = + (instr->ExtractBit(29) << 1) | instr->ExtractBit(23); + decode_field = (decode_field << 3) | instr->ExtractBits(13, 11); + switch (decode_field) { + case 0x5: + case 0xB: + case 0xC: + case 0xD: + case 0x11: + case 0x19: + case 0x1B: + case 0x1F: + VisitUnallocated(instr); + break; + default: + VisitNEON3SameFP16(instr); + break; + } + } else { + VisitUnallocated(instr); + } + } + } else if (instr->ExtractBit(10) == 0) { + VisitUnallocated(instr); + } else if ((instr->ExtractBits(14, 11) == 0x3) || + (instr->ExtractBits(14, 13) == 0x1)) { + // opcode = 0b0011 + // opcode = 0b01xx + VisitUnallocated(instr); + } else if (instr->ExtractBit(29) == 0) { + // U == 0 + if (instr->ExtractBits(14, 11) == 0x2) { + // opcode = 0b0010 + VisitNEON3SameExtra(instr); + } else { + VisitUnallocated(instr); + } + } else { + // U == 1 + if ((instr->ExtractBits(14, 11) == 0xd) || + (instr->ExtractBits(14, 11) == 0xf)) { + // opcode = 0b11x1 + VisitUnallocated(instr); + } else { + VisitNEON3SameExtra(instr); + } + } + } else { + if (instr->ExtractBit(10) == 0) { + if (instr->ExtractBit(11) == 0) { + VisitNEON3Different(instr); + } else { + if (instr->ExtractBits(18, 17) == 0) { + if (instr->ExtractBit(20) == 0) { + if (instr->ExtractBit(19) == 0) { + VisitNEON2RegMisc(instr); + } else { + if (instr->ExtractBits(30, 29) == 0x2) { + VisitCryptoAES(instr); + } else { + VisitUnallocated(instr); + } + } + } else { + if (instr->ExtractBit(19) == 0) { + VisitNEONAcrossLanes(instr); + } else { + if (instr->ExtractBit(22) == 0) { + VisitUnallocated(instr); + } else { + if ((instr->ExtractBits(16, 15) == 0x0) || + (instr->ExtractBits(16, 14) == 0x2) || + (instr->ExtractBits(16, 15) == 0x2) || + (instr->ExtractBits(16, 12) == 0x1e) || + ((instr->ExtractBit(23) == 0) && + ((instr->ExtractBits(16, 14) == 0x3) || + (instr->ExtractBits(16, 12) == 0x1f))) || + ((instr->ExtractBit(23) == 1) && + (instr->ExtractBits(16, 12) == 0x1c))) { + VisitUnallocated(instr); + } else { + VisitNEON2RegMiscFP16(instr); + } + } + } + } + } else { + VisitUnallocated(instr); + } + } + } else { + VisitNEON3Same(instr); + } + } + } else { + if (instr->ExtractBit(10) == 0) { + VisitNEONByIndexedElement(instr); + } else { + if (instr->ExtractBit(23) == 0) { + if (instr->ExtractBits(22, 19) == 0) { + VisitNEONModifiedImmediate(instr); + } else { + VisitNEONShiftImmediate(instr); + } + } else { + VisitUnallocated(instr); + } + } + } + } else { + VisitUnallocated(instr); + } +} + + +void Decoder::DecodeNEONScalarDataProcessing(const Instruction* instr) { + VIXL_ASSERT(instr->ExtractBits(28, 25) == 0xF); + if (instr->ExtractBit(24) == 0) { + if (instr->ExtractBit(21) == 0) { + if (instr->ExtractBit(15) == 0) { + if (instr->ExtractBit(10) == 0) { + if (instr->ExtractBit(29) == 0) { + if (instr->ExtractBit(11) == 0) { + VisitCrypto3RegSHA(instr); + } else { + VisitUnallocated(instr); + } + } else { + VisitUnallocated(instr); + } + } else { + if (instr->ExtractBits(23, 22) == 0) { + VisitNEONScalarCopy(instr); + } else { + if (instr->Mask(0x00404000) == 0x00400000) { + if ((instr->ExtractBits(13, 11) == 0x6) || + (instr->ExtractBits(13, 11) < 2) || + ((instr->Mask(0x20800000) == 0x00000000) && + ((instr->ExtractBits(13, 11) < 0x3) || + (instr->ExtractBits(13, 11) == 0x5))) || + ((instr->Mask(0x20800000) == 0x00800000) && + (instr->ExtractBits(13, 11) < 0x7)) || + ((instr->Mask(0x20800000) == 0x20000000) && + ((instr->ExtractBits(13, 11) < 0x4) || + (instr->ExtractBits(13, 11) == 0x7))) || + ((instr->Mask(0x20800000) == 0x20800000) && + (instr->ExtractBits(12, 11) == 0x3))) { + VisitUnallocated(instr); + } else { + VisitNEONScalar3SameFP16(instr); + } + } else { + VisitUnallocated(instr); + } + } + } + } else { + if (instr->ExtractBit(29) == 0) { + VisitUnallocated(instr); + } else { + if (instr->ExtractBit(10) == 0) { + VisitUnallocated(instr); + } else { + VisitNEONScalar3SameExtra(instr); + } + } + } + } else { + if (instr->ExtractBit(10) == 0) { + if (instr->ExtractBit(11) == 0) { + VisitNEONScalar3Diff(instr); + } else { + if (instr->ExtractBits(18, 17) == 0) { + if (instr->ExtractBit(20) == 0) { + if (instr->ExtractBit(19) == 0) { + VisitNEONScalar2RegMisc(instr); + } else { + if (instr->ExtractBit(29) == 0) { + VisitCrypto2RegSHA(instr); + } else { + VisitUnallocated(instr); + } + } + } else { + if (instr->ExtractBit(19) == 0) { + VisitNEONScalarPairwise(instr); + } else { + if (instr->ExtractBit(22) == 0) { + VisitUnallocated(instr); + } else { + if ((instr->ExtractBits(16, 15) == 0x0) || + (instr->ExtractBits(16, 14) == 0x2) || + (instr->ExtractBits(16, 15) == 0x2) || + (instr->ExtractBits(16, 13) == 0xc) || + (instr->ExtractBits(16, 12) == 0x1e) || + ((instr->ExtractBit(23) == 0) && + ((instr->ExtractBits(16, 14) == 0x3) || + (instr->ExtractBits(16, 12) == 0x1f))) || + ((instr->ExtractBit(23) == 1) && + ((instr->ExtractBits(16, 12) == 0xf) || + (instr->ExtractBits(16, 12) == 0x1c) || + ((instr->ExtractBit(29) == 1) && + ((instr->ExtractBits(16, 12) == 0xe) || + (instr->ExtractBits(16, 12) == 0x1f)))))) { + VisitUnallocated(instr); + } else { + VisitNEONScalar2RegMiscFP16(instr); + } + } + } + } + } else { + VisitUnallocated(instr); + } + } + } else { + VisitNEONScalar3Same(instr); + } + } + } else { + if (instr->ExtractBit(10) == 0) { + VisitNEONScalarByIndexedElement(instr); + } else { + if (instr->ExtractBit(23) == 0) { + VisitNEONScalarShiftImmediate(instr); + } else { + VisitUnallocated(instr); + } + } + } +} + + +#define DEFINE_VISITOR_CALLERS(A) \ + void Decoder::Visit##A(const Instruction* instr) { \ + VIXL_ASSERT(((A##FMask == 0) && (A##Fixed == 0)) || \ + (instr->Mask(A##FMask) == A##Fixed)); \ + std::list::iterator it; \ + for (it = visitors_.begin(); it != visitors_.end(); it++) { \ + (*it)->Visit##A(instr); \ + } \ + } +VISITOR_LIST(DEFINE_VISITOR_CALLERS) +#undef DEFINE_VISITOR_CALLERS +} // namespace aarch64 +} // namespace vixl diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/decoder-aarch64.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/decoder-aarch64.h new file mode 100644 index 00000000..94928bed --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/decoder-aarch64.h @@ -0,0 +1,294 @@ +// Copyright 2014, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_DECODER_AARCH64_H_ +#define VIXL_AARCH64_DECODER_AARCH64_H_ + +#include + +#include "../globals-vixl.h" + +#include "instructions-aarch64.h" + + +// List macro containing all visitors needed by the decoder class. + +#define VISITOR_LIST_THAT_RETURN(V) \ + V(AddSubExtended) \ + V(AddSubImmediate) \ + V(AddSubShifted) \ + V(AddSubWithCarry) \ + V(AtomicMemory) \ + V(Bitfield) \ + V(CompareBranch) \ + V(ConditionalBranch) \ + V(ConditionalCompareImmediate) \ + V(ConditionalCompareRegister) \ + V(ConditionalSelect) \ + V(Crypto2RegSHA) \ + V(Crypto3RegSHA) \ + V(CryptoAES) \ + V(DataProcessing1Source) \ + V(DataProcessing2Source) \ + V(DataProcessing3Source) \ + V(Exception) \ + V(Extract) \ + V(EvaluateIntoFlags) \ + V(FPCompare) \ + V(FPConditionalCompare) \ + V(FPConditionalSelect) \ + V(FPDataProcessing1Source) \ + V(FPDataProcessing2Source) \ + V(FPDataProcessing3Source) \ + V(FPFixedPointConvert) \ + V(FPImmediate) \ + V(FPIntegerConvert) \ + V(LoadLiteral) \ + V(LoadStoreExclusive) \ + V(LoadStorePAC) \ + V(LoadStorePairNonTemporal) \ + V(LoadStorePairOffset) \ + V(LoadStorePairPostIndex) \ + V(LoadStorePairPreIndex) \ + V(LoadStorePostIndex) \ + V(LoadStorePreIndex) \ + V(LoadStoreRCpcUnscaledOffset) \ + V(LoadStoreRegisterOffset) \ + V(LoadStoreUnscaledOffset) \ + V(LoadStoreUnsignedOffset) \ + V(LogicalImmediate) \ + V(LogicalShifted) \ + V(MoveWideImmediate) \ + V(NEON2RegMisc) \ + V(NEON2RegMiscFP16) \ + V(NEON3Different) \ + V(NEON3Same) \ + V(NEON3SameExtra) \ + V(NEON3SameFP16) \ + V(NEONAcrossLanes) \ + V(NEONByIndexedElement) \ + V(NEONCopy) \ + V(NEONExtract) \ + V(NEONLoadStoreMultiStruct) \ + V(NEONLoadStoreMultiStructPostIndex) \ + V(NEONLoadStoreSingleStruct) \ + V(NEONLoadStoreSingleStructPostIndex) \ + V(NEONModifiedImmediate) \ + V(NEONPerm) \ + V(NEONScalar2RegMisc) \ + V(NEONScalar2RegMiscFP16) \ + V(NEONScalar3Diff) \ + V(NEONScalar3Same) \ + V(NEONScalar3SameExtra) \ + V(NEONScalar3SameFP16) \ + V(NEONScalarByIndexedElement) \ + V(NEONScalarCopy) \ + V(NEONScalarPairwise) \ + V(NEONScalarShiftImmediate) \ + V(NEONShiftImmediate) \ + V(NEONTable) \ + V(PCRelAddressing) \ + V(RotateRightIntoFlags) \ + V(System) \ + V(TestBranch) \ + V(UnconditionalBranch) \ + V(UnconditionalBranchToRegister) + +#define VISITOR_LIST_THAT_DONT_RETURN(V) \ + V(Unallocated) \ + V(Unimplemented) + +#define VISITOR_LIST(V) \ + VISITOR_LIST_THAT_RETURN(V) \ + VISITOR_LIST_THAT_DONT_RETURN(V) + +namespace vixl { +namespace aarch64 { + +// The Visitor interface. Disassembler and simulator (and other tools) +// must provide implementations for all of these functions. +class DecoderVisitor { + public: + enum VisitorConstness { kConstVisitor, kNonConstVisitor }; + explicit DecoderVisitor(VisitorConstness constness = kConstVisitor) + : constness_(constness) {} + + virtual ~DecoderVisitor() {} + +#define DECLARE(A) virtual void Visit##A(const Instruction* instr) = 0; + VISITOR_LIST(DECLARE) +#undef DECLARE + + bool IsConstVisitor() const { return constness_ == kConstVisitor; } + Instruction* MutableInstruction(const Instruction* instr) { + VIXL_ASSERT(!IsConstVisitor()); + return const_cast(instr); + } + + private: + const VisitorConstness constness_; +}; + + +class Decoder { + public: + Decoder() {} + + // Top-level wrappers around the actual decoding function. + void Decode(const Instruction* instr) { + std::list::iterator it; + for (it = visitors_.begin(); it != visitors_.end(); it++) { + VIXL_ASSERT((*it)->IsConstVisitor()); + } + DecodeInstruction(instr); + } + void Decode(Instruction* instr) { + DecodeInstruction(const_cast(instr)); + } + + // Decode all instructions from start (inclusive) to end (exclusive). + template + void Decode(T start, T end) { + for (T instr = start; instr < end; instr = instr->GetNextInstruction()) { + Decode(instr); + } + } + + // Register a new visitor class with the decoder. + // Decode() will call the corresponding visitor method from all registered + // visitor classes when decoding reaches the leaf node of the instruction + // decode tree. + // Visitors are called in order. + // A visitor can be registered multiple times. + // + // d.AppendVisitor(V1); + // d.AppendVisitor(V2); + // d.PrependVisitor(V2); + // d.AppendVisitor(V3); + // + // d.Decode(i); + // + // will call in order visitor methods in V2, V1, V2, V3. + void AppendVisitor(DecoderVisitor* visitor); + void PrependVisitor(DecoderVisitor* visitor); + // These helpers register `new_visitor` before or after the first instance of + // `registered_visiter` in the list. + // So if + // V1, V2, V1, V2 + // are registered in this order in the decoder, calls to + // d.InsertVisitorAfter(V3, V1); + // d.InsertVisitorBefore(V4, V2); + // will yield the order + // V1, V3, V4, V2, V1, V2 + // + // For more complex modifications of the order of registered visitors, one can + // directly access and modify the list of visitors via the `visitors()' + // accessor. + void InsertVisitorBefore(DecoderVisitor* new_visitor, + DecoderVisitor* registered_visitor); + void InsertVisitorAfter(DecoderVisitor* new_visitor, + DecoderVisitor* registered_visitor); + + // Remove all instances of a previously registered visitor class from the list + // of visitors stored by the decoder. + void RemoveVisitor(DecoderVisitor* visitor); + +#define DECLARE(A) void Visit##A(const Instruction* instr); + VISITOR_LIST(DECLARE) +#undef DECLARE + + + std::list* visitors() { return &visitors_; } + + private: + // Decodes an instruction and calls the visitor functions registered with the + // Decoder class. + void DecodeInstruction(const Instruction* instr); + + // Decode the PC relative addressing instruction, and call the corresponding + // visitors. + // On entry, instruction bits 27:24 = 0x0. + void DecodePCRelAddressing(const Instruction* instr); + + // Decode the add/subtract immediate instruction, and call the correspoding + // visitors. + // On entry, instruction bits 27:24 = 0x1. + void DecodeAddSubImmediate(const Instruction* instr); + + // Decode the branch, system command, and exception generation parts of + // the instruction tree, and call the corresponding visitors. + // On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}. + void DecodeBranchSystemException(const Instruction* instr); + + // Decode the load and store parts of the instruction tree, and call + // the corresponding visitors. + // On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}. + void DecodeLoadStore(const Instruction* instr); + + // Decode the logical immediate and move wide immediate parts of the + // instruction tree, and call the corresponding visitors. + // On entry, instruction bits 27:24 = 0x2. + void DecodeLogical(const Instruction* instr); + + // Decode the bitfield and extraction parts of the instruction tree, + // and call the corresponding visitors. + // On entry, instruction bits 27:24 = 0x3. + void DecodeBitfieldExtract(const Instruction* instr); + + // Decode the data processing parts of the instruction tree, and call the + // corresponding visitors. + // On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}. + void DecodeDataProcessing(const Instruction* instr); + + // Decode the floating point parts of the instruction tree, and call the + // corresponding visitors. + // On entry, instruction bits 27:24 = {0xE, 0xF}. + void DecodeFP(const Instruction* instr); + + // Decode the Advanced SIMD (NEON) load/store part of the instruction tree, + // and call the corresponding visitors. + // On entry, instruction bits 29:25 = 0x6. + void DecodeNEONLoadStore(const Instruction* instr); + + // Decode the Advanced SIMD (NEON) vector data processing part of the + // instruction tree, and call the corresponding visitors. + // On entry, instruction bits 28:25 = 0x7. + void DecodeNEONVectorDataProcessing(const Instruction* instr); + + // Decode the Advanced SIMD (NEON) scalar data processing part of the + // instruction tree, and call the corresponding visitors. + // On entry, instruction bits 28:25 = 0xF. + void DecodeNEONScalarDataProcessing(const Instruction* instr); + + private: + // Visitors are registered in a list. + std::list visitors_; +}; + +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_AARCH64_DECODER_AARCH64_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/disasm-aarch64.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/disasm-aarch64.cc new file mode 100644 index 00000000..28fec6bc --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/disasm-aarch64.cc @@ -0,0 +1,6035 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include +#include + +#include "disasm-aarch64.h" + +namespace vixl { +namespace aarch64 { + + +Disassembler::Disassembler() { + buffer_size_ = 256; + buffer_ = reinterpret_cast(malloc(buffer_size_)); + buffer_pos_ = 0; + own_buffer_ = true; + code_address_offset_ = 0; +} + + +Disassembler::Disassembler(char *text_buffer, int buffer_size) { + buffer_size_ = buffer_size; + buffer_ = text_buffer; + buffer_pos_ = 0; + own_buffer_ = false; + code_address_offset_ = 0; +} + + +Disassembler::~Disassembler() { + if (own_buffer_) { + free(buffer_); + } +} + + +char *Disassembler::GetOutput() { return buffer_; } + + +void Disassembler::VisitAddSubImmediate(const Instruction *instr) { + bool rd_is_zr = RdIsZROrSP(instr); + bool stack_op = + (rd_is_zr || RnIsZROrSP(instr)) && (instr->GetImmAddSub() == 0) ? true + : false; + const char *mnemonic = ""; + const char *form = "'Rds, 'Rns, 'IAddSub"; + const char *form_cmp = "'Rns, 'IAddSub"; + const char *form_mov = "'Rds, 'Rns"; + + switch (instr->Mask(AddSubImmediateMask)) { + case ADD_w_imm: + case ADD_x_imm: { + mnemonic = "add"; + if (stack_op) { + mnemonic = "mov"; + form = form_mov; + } + break; + } + case ADDS_w_imm: + case ADDS_x_imm: { + mnemonic = "adds"; + if (rd_is_zr) { + mnemonic = "cmn"; + form = form_cmp; + } + break; + } + case SUB_w_imm: + case SUB_x_imm: + mnemonic = "sub"; + break; + case SUBS_w_imm: + case SUBS_x_imm: { + mnemonic = "subs"; + if (rd_is_zr) { + mnemonic = "cmp"; + form = form_cmp; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitAddSubShifted(const Instruction *instr) { + bool rd_is_zr = RdIsZROrSP(instr); + bool rn_is_zr = RnIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm'NDP"; + const char *form_cmp = "'Rn, 'Rm'NDP"; + const char *form_neg = "'Rd, 'Rm'NDP"; + + switch (instr->Mask(AddSubShiftedMask)) { + case ADD_w_shift: + case ADD_x_shift: + mnemonic = "add"; + break; + case ADDS_w_shift: + case ADDS_x_shift: { + mnemonic = "adds"; + if (rd_is_zr) { + mnemonic = "cmn"; + form = form_cmp; + } + break; + } + case SUB_w_shift: + case SUB_x_shift: { + mnemonic = "sub"; + if (rn_is_zr) { + mnemonic = "neg"; + form = form_neg; + } + break; + } + case SUBS_w_shift: + case SUBS_x_shift: { + mnemonic = "subs"; + if (rd_is_zr) { + mnemonic = "cmp"; + form = form_cmp; + } else if (rn_is_zr) { + mnemonic = "negs"; + form = form_neg; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitAddSubExtended(const Instruction *instr) { + bool rd_is_zr = RdIsZROrSP(instr); + const char *mnemonic = ""; + Extend mode = static_cast(instr->GetExtendMode()); + const char *form = ((mode == UXTX) || (mode == SXTX)) ? "'Rds, 'Rns, 'Xm'Ext" + : "'Rds, 'Rns, 'Wm'Ext"; + const char *form_cmp = + ((mode == UXTX) || (mode == SXTX)) ? "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext"; + + switch (instr->Mask(AddSubExtendedMask)) { + case ADD_w_ext: + case ADD_x_ext: + mnemonic = "add"; + break; + case ADDS_w_ext: + case ADDS_x_ext: { + mnemonic = "adds"; + if (rd_is_zr) { + mnemonic = "cmn"; + form = form_cmp; + } + break; + } + case SUB_w_ext: + case SUB_x_ext: + mnemonic = "sub"; + break; + case SUBS_w_ext: + case SUBS_x_ext: { + mnemonic = "subs"; + if (rd_is_zr) { + mnemonic = "cmp"; + form = form_cmp; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitAddSubWithCarry(const Instruction *instr) { + bool rn_is_zr = RnIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm"; + const char *form_neg = "'Rd, 'Rm"; + + switch (instr->Mask(AddSubWithCarryMask)) { + case ADC_w: + case ADC_x: + mnemonic = "adc"; + break; + case ADCS_w: + case ADCS_x: + mnemonic = "adcs"; + break; + case SBC_w: + case SBC_x: { + mnemonic = "sbc"; + if (rn_is_zr) { + mnemonic = "ngc"; + form = form_neg; + } + break; + } + case SBCS_w: + case SBCS_x: { + mnemonic = "sbcs"; + if (rn_is_zr) { + mnemonic = "ngcs"; + form = form_neg; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitRotateRightIntoFlags(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(RotateRightIntoFlags)"; + + switch (instr->Mask(RotateRightIntoFlagsMask)) { + case RMIF: + mnemonic = "rmif"; + form = "'Xn, 'IRr, 'INzcv"; + break; + default: + VIXL_UNREACHABLE(); + } + + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitEvaluateIntoFlags(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(EvaluateIntoFlags)"; + + switch (instr->Mask(EvaluateIntoFlagsMask)) { + case SETF8: + mnemonic = "setf8"; + form = "'Wn"; + break; + case SETF16: + mnemonic = "setf16"; + form = "'Wn"; + break; + default: + VIXL_UNREACHABLE(); + } + + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLogicalImmediate(const Instruction *instr) { + bool rd_is_zr = RdIsZROrSP(instr); + bool rn_is_zr = RnIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Rds, 'Rn, 'ITri"; + + if (instr->GetImmLogical() == 0) { + // The immediate encoded in the instruction is not in the expected format. + Format(instr, "unallocated", "(LogicalImmediate)"); + return; + } + + switch (instr->Mask(LogicalImmediateMask)) { + case AND_w_imm: + case AND_x_imm: + mnemonic = "and"; + break; + case ORR_w_imm: + case ORR_x_imm: { + mnemonic = "orr"; + unsigned reg_size = + (instr->GetSixtyFourBits() == 1) ? kXRegSize : kWRegSize; + if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->GetImmLogical())) { + mnemonic = "mov"; + form = "'Rds, 'ITri"; + } + break; + } + case EOR_w_imm: + case EOR_x_imm: + mnemonic = "eor"; + break; + case ANDS_w_imm: + case ANDS_x_imm: { + mnemonic = "ands"; + if (rd_is_zr) { + mnemonic = "tst"; + form = "'Rn, 'ITri"; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) { + VIXL_ASSERT((reg_size == kXRegSize) || + ((reg_size == kWRegSize) && (value <= 0xffffffff))); + + // Test for movz: 16 bits set at positions 0, 16, 32 or 48. + if (((value & UINT64_C(0xffffffffffff0000)) == 0) || + ((value & UINT64_C(0xffffffff0000ffff)) == 0) || + ((value & UINT64_C(0xffff0000ffffffff)) == 0) || + ((value & UINT64_C(0x0000ffffffffffff)) == 0)) { + return true; + } + + // Test for movn: NOT(16 bits set at positions 0, 16, 32 or 48). + if ((reg_size == kXRegSize) && + (((~value & UINT64_C(0xffffffffffff0000)) == 0) || + ((~value & UINT64_C(0xffffffff0000ffff)) == 0) || + ((~value & UINT64_C(0xffff0000ffffffff)) == 0) || + ((~value & UINT64_C(0x0000ffffffffffff)) == 0))) { + return true; + } + if ((reg_size == kWRegSize) && (((value & 0xffff0000) == 0xffff0000) || + ((value & 0x0000ffff) == 0x0000ffff))) { + return true; + } + return false; +} + + +void Disassembler::VisitLogicalShifted(const Instruction *instr) { + bool rd_is_zr = RdIsZROrSP(instr); + bool rn_is_zr = RnIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm'NLo"; + + switch (instr->Mask(LogicalShiftedMask)) { + case AND_w: + case AND_x: + mnemonic = "and"; + break; + case BIC_w: + case BIC_x: + mnemonic = "bic"; + break; + case EOR_w: + case EOR_x: + mnemonic = "eor"; + break; + case EON_w: + case EON_x: + mnemonic = "eon"; + break; + case BICS_w: + case BICS_x: + mnemonic = "bics"; + break; + case ANDS_w: + case ANDS_x: { + mnemonic = "ands"; + if (rd_is_zr) { + mnemonic = "tst"; + form = "'Rn, 'Rm'NLo"; + } + break; + } + case ORR_w: + case ORR_x: { + mnemonic = "orr"; + if (rn_is_zr && (instr->GetImmDPShift() == 0) && + (instr->GetShiftDP() == LSL)) { + mnemonic = "mov"; + form = "'Rd, 'Rm"; + } + break; + } + case ORN_w: + case ORN_x: { + mnemonic = "orn"; + if (rn_is_zr) { + mnemonic = "mvn"; + form = "'Rd, 'Rm'NLo"; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitConditionalCompareRegister(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Rn, 'Rm, 'INzcv, 'Cond"; + + switch (instr->Mask(ConditionalCompareRegisterMask)) { + case CCMN_w: + case CCMN_x: + mnemonic = "ccmn"; + break; + case CCMP_w: + case CCMP_x: + mnemonic = "ccmp"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitConditionalCompareImmediate(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Rn, 'IP, 'INzcv, 'Cond"; + + switch (instr->Mask(ConditionalCompareImmediateMask)) { + case CCMN_w_imm: + case CCMN_x_imm: + mnemonic = "ccmn"; + break; + case CCMP_w_imm: + case CCMP_x_imm: + mnemonic = "ccmp"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitConditionalSelect(const Instruction *instr) { + bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr)); + bool rn_is_rm = (instr->GetRn() == instr->GetRm()); + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm, 'Cond"; + const char *form_test = "'Rd, 'CInv"; + const char *form_update = "'Rd, 'Rn, 'CInv"; + + Condition cond = static_cast(instr->GetCondition()); + bool invertible_cond = (cond != al) && (cond != nv); + + switch (instr->Mask(ConditionalSelectMask)) { + case CSEL_w: + case CSEL_x: + mnemonic = "csel"; + break; + case CSINC_w: + case CSINC_x: { + mnemonic = "csinc"; + if (rnm_is_zr && invertible_cond) { + mnemonic = "cset"; + form = form_test; + } else if (rn_is_rm && invertible_cond) { + mnemonic = "cinc"; + form = form_update; + } + break; + } + case CSINV_w: + case CSINV_x: { + mnemonic = "csinv"; + if (rnm_is_zr && invertible_cond) { + mnemonic = "csetm"; + form = form_test; + } else if (rn_is_rm && invertible_cond) { + mnemonic = "cinv"; + form = form_update; + } + break; + } + case CSNEG_w: + case CSNEG_x: { + mnemonic = "csneg"; + if (rn_is_rm && invertible_cond) { + mnemonic = "cneg"; + form = form_update; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitBitfield(const Instruction *instr) { + unsigned s = instr->GetImmS(); + unsigned r = instr->GetImmR(); + unsigned rd_size_minus_1 = + ((instr->GetSixtyFourBits() == 1) ? kXRegSize : kWRegSize) - 1; + const char *mnemonic = ""; + const char *form = ""; + const char *form_shift_right = "'Rd, 'Rn, 'IBr"; + const char *form_extend = "'Rd, 'Wn"; + const char *form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1"; + const char *form_bfc = "'Rd, 'IBZ-r, 'IBs+1"; + const char *form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1"; + const char *form_lsl = "'Rd, 'Rn, 'IBZ-r"; + + switch (instr->Mask(BitfieldMask)) { + case SBFM_w: + case SBFM_x: { + mnemonic = "sbfx"; + form = form_bfx; + if (r == 0) { + form = form_extend; + if (s == 7) { + mnemonic = "sxtb"; + } else if (s == 15) { + mnemonic = "sxth"; + } else if ((s == 31) && (instr->GetSixtyFourBits() == 1)) { + mnemonic = "sxtw"; + } else { + form = form_bfx; + } + } else if (s == rd_size_minus_1) { + mnemonic = "asr"; + form = form_shift_right; + } else if (s < r) { + mnemonic = "sbfiz"; + form = form_bfiz; + } + break; + } + case UBFM_w: + case UBFM_x: { + mnemonic = "ubfx"; + form = form_bfx; + if (r == 0) { + form = form_extend; + if (s == 7) { + mnemonic = "uxtb"; + } else if (s == 15) { + mnemonic = "uxth"; + } else { + form = form_bfx; + } + } + if (s == rd_size_minus_1) { + mnemonic = "lsr"; + form = form_shift_right; + } else if (r == s + 1) { + mnemonic = "lsl"; + form = form_lsl; + } else if (s < r) { + mnemonic = "ubfiz"; + form = form_bfiz; + } + break; + } + case BFM_w: + case BFM_x: { + mnemonic = "bfxil"; + form = form_bfx; + if (s < r) { + if (instr->GetRn() == kZeroRegCode) { + mnemonic = "bfc"; + form = form_bfc; + } else { + mnemonic = "bfi"; + form = form_bfiz; + } + } + } + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitExtract(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn, 'Rm, 'IExtract"; + + switch (instr->Mask(ExtractMask)) { + case EXTR_w: + case EXTR_x: { + if (instr->GetRn() == instr->GetRm()) { + mnemonic = "ror"; + form = "'Rd, 'Rn, 'IExtract"; + } else { + mnemonic = "extr"; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitPCRelAddressing(const Instruction *instr) { + switch (instr->Mask(PCRelAddressingMask)) { + case ADR: + Format(instr, "adr", "'Xd, 'AddrPCRelByte"); + break; + case ADRP: + Format(instr, "adrp", "'Xd, 'AddrPCRelPage"); + break; + default: + Format(instr, "unimplemented", "(PCRelAddressing)"); + } +} + + +void Disassembler::VisitConditionalBranch(const Instruction *instr) { + switch (instr->Mask(ConditionalBranchMask)) { + case B_cond: + Format(instr, "b.'CBrn", "'TImmCond"); + break; + default: + VIXL_UNREACHABLE(); + } +} + + +void Disassembler::VisitUnconditionalBranchToRegister( + const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form; + + switch (instr->Mask(UnconditionalBranchToRegisterMask)) { + case BR: + mnemonic = "br"; + form = "'Xn"; + break; + case BLR: + mnemonic = "blr"; + form = "'Xn"; + break; + case RET: { + mnemonic = "ret"; + if (instr->GetRn() == kLinkRegCode) { + form = NULL; + } else { + form = "'Xn"; + } + break; + } + case BRAAZ: + mnemonic = "braaz"; + form = "'Xn"; + break; + case BRABZ: + mnemonic = "brabz"; + form = "'Xn"; + break; + case BLRAAZ: + mnemonic = "blraaz"; + form = "'Xn"; + break; + case BLRABZ: + mnemonic = "blrabz"; + form = "'Xn"; + break; + case RETAA: + mnemonic = "retaa"; + form = NULL; + break; + case RETAB: + mnemonic = "retab"; + form = NULL; + break; + case BRAA: + mnemonic = "braa"; + form = "'Xn, 'Xds"; + break; + case BRAB: + mnemonic = "brab"; + form = "'Xn, 'Xds"; + break; + case BLRAA: + mnemonic = "blraa"; + form = "'Xn, 'Xds"; + break; + case BLRAB: + mnemonic = "blrab"; + form = "'Xn, 'Xds"; + break; + default: + form = "(UnconditionalBranchToRegister)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitUnconditionalBranch(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'TImmUncn"; + + switch (instr->Mask(UnconditionalBranchMask)) { + case B: + mnemonic = "b"; + break; + case BL: + mnemonic = "bl"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitDataProcessing1Source(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Rd, 'Rn"; + + switch (instr->Mask(DataProcessing1SourceMask)) { +#define FORMAT(A, B) \ + case A##_w: \ + case A##_x: \ + mnemonic = B; \ + break; + FORMAT(RBIT, "rbit"); + FORMAT(REV16, "rev16"); + FORMAT(REV, "rev"); + FORMAT(CLZ, "clz"); + FORMAT(CLS, "cls"); +#undef FORMAT + +#define PAUTH_VARIATIONS(V) \ + V(PACI, "paci") \ + V(PACD, "pacd") \ + V(AUTI, "auti") \ + V(AUTD, "autd") +#define PAUTH_CASE(NAME, MN) \ + case NAME##A: \ + mnemonic = MN "a"; \ + form = "'Xd, 'Xns"; \ + break; \ + case NAME##ZA: \ + mnemonic = MN "za"; \ + form = "'Xd"; \ + break; \ + case NAME##B: \ + mnemonic = MN "b"; \ + form = "'Xd, 'Xns"; \ + break; \ + case NAME##ZB: \ + mnemonic = MN "zb"; \ + form = "'Xd"; \ + break; + + PAUTH_VARIATIONS(PAUTH_CASE) +#undef PAUTH_CASE + + case XPACI: + mnemonic = "xpaci"; + form = "'Xd"; + break; + case XPACD: + mnemonic = "xpacd"; + form = "'Xd"; + break; + case REV32_x: + mnemonic = "rev32"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitDataProcessing2Source(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Rd, 'Rn, 'Rm"; + const char *form_wwx = "'Wd, 'Wn, 'Xm"; + + switch (instr->Mask(DataProcessing2SourceMask)) { +#define FORMAT(A, B) \ + case A##_w: \ + case A##_x: \ + mnemonic = B; \ + break; + FORMAT(UDIV, "udiv"); + FORMAT(SDIV, "sdiv"); + FORMAT(LSLV, "lsl"); + FORMAT(LSRV, "lsr"); + FORMAT(ASRV, "asr"); + FORMAT(RORV, "ror"); +#undef FORMAT + case PACGA: + mnemonic = "pacga"; + form = "'Xd, 'Xn, 'Xms"; + break; + case CRC32B: + mnemonic = "crc32b"; + break; + case CRC32H: + mnemonic = "crc32h"; + break; + case CRC32W: + mnemonic = "crc32w"; + break; + case CRC32X: + mnemonic = "crc32x"; + form = form_wwx; + break; + case CRC32CB: + mnemonic = "crc32cb"; + break; + case CRC32CH: + mnemonic = "crc32ch"; + break; + case CRC32CW: + mnemonic = "crc32cw"; + break; + case CRC32CX: + mnemonic = "crc32cx"; + form = form_wwx; + break; + default: + form = "(DataProcessing2Source)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitDataProcessing3Source(const Instruction *instr) { + bool ra_is_zr = RaIsZROrSP(instr); + const char *mnemonic = ""; + const char *form = "'Xd, 'Wn, 'Wm, 'Xa"; + const char *form_rrr = "'Rd, 'Rn, 'Rm"; + const char *form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra"; + const char *form_xww = "'Xd, 'Wn, 'Wm"; + const char *form_xxx = "'Xd, 'Xn, 'Xm"; + + switch (instr->Mask(DataProcessing3SourceMask)) { + case MADD_w: + case MADD_x: { + mnemonic = "madd"; + form = form_rrrr; + if (ra_is_zr) { + mnemonic = "mul"; + form = form_rrr; + } + break; + } + case MSUB_w: + case MSUB_x: { + mnemonic = "msub"; + form = form_rrrr; + if (ra_is_zr) { + mnemonic = "mneg"; + form = form_rrr; + } + break; + } + case SMADDL_x: { + mnemonic = "smaddl"; + if (ra_is_zr) { + mnemonic = "smull"; + form = form_xww; + } + break; + } + case SMSUBL_x: { + mnemonic = "smsubl"; + if (ra_is_zr) { + mnemonic = "smnegl"; + form = form_xww; + } + break; + } + case UMADDL_x: { + mnemonic = "umaddl"; + if (ra_is_zr) { + mnemonic = "umull"; + form = form_xww; + } + break; + } + case UMSUBL_x: { + mnemonic = "umsubl"; + if (ra_is_zr) { + mnemonic = "umnegl"; + form = form_xww; + } + break; + } + case SMULH_x: { + mnemonic = "smulh"; + form = form_xxx; + break; + } + case UMULH_x: { + mnemonic = "umulh"; + form = form_xxx; + break; + } + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitCompareBranch(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Rt, 'TImmCmpa"; + + switch (instr->Mask(CompareBranchMask)) { + case CBZ_w: + case CBZ_x: + mnemonic = "cbz"; + break; + case CBNZ_w: + case CBNZ_x: + mnemonic = "cbnz"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitTestBranch(const Instruction *instr) { + const char *mnemonic = ""; + // If the top bit of the immediate is clear, the tested register is + // disassembled as Wt, otherwise Xt. As the top bit of the immediate is + // encoded in bit 31 of the instruction, we can reuse the Rt form, which + // uses bit 31 (normally "sf") to choose the register size. + const char *form = "'Rt, 'IS, 'TImmTest"; + + switch (instr->Mask(TestBranchMask)) { + case TBZ: + mnemonic = "tbz"; + break; + case TBNZ: + mnemonic = "tbnz"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitMoveWideImmediate(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Rd, 'IMoveImm"; + + // Print the shift separately for movk, to make it clear which half word will + // be overwritten. Movn and movz print the computed immediate, which includes + // shift calculation. + switch (instr->Mask(MoveWideImmediateMask)) { + case MOVN_w: + case MOVN_x: + if ((instr->GetImmMoveWide()) || (instr->GetShiftMoveWide() == 0)) { + if ((instr->GetSixtyFourBits() == 0) && + (instr->GetImmMoveWide() == 0xffff)) { + mnemonic = "movn"; + } else { + mnemonic = "mov"; + form = "'Rd, 'IMoveNeg"; + } + } else { + mnemonic = "movn"; + } + break; + case MOVZ_w: + case MOVZ_x: + if ((instr->GetImmMoveWide()) || (instr->GetShiftMoveWide() == 0)) + mnemonic = "mov"; + else + mnemonic = "movz"; + break; + case MOVK_w: + case MOVK_x: + mnemonic = "movk"; + form = "'Rd, 'IMoveLSL"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +#define LOAD_STORE_LIST(V) \ + V(STRB_w, "strb", "'Wt") \ + V(STRH_w, "strh", "'Wt") \ + V(STR_w, "str", "'Wt") \ + V(STR_x, "str", "'Xt") \ + V(LDRB_w, "ldrb", "'Wt") \ + V(LDRH_w, "ldrh", "'Wt") \ + V(LDR_w, "ldr", "'Wt") \ + V(LDR_x, "ldr", "'Xt") \ + V(LDRSB_x, "ldrsb", "'Xt") \ + V(LDRSH_x, "ldrsh", "'Xt") \ + V(LDRSW_x, "ldrsw", "'Xt") \ + V(LDRSB_w, "ldrsb", "'Wt") \ + V(LDRSH_w, "ldrsh", "'Wt") \ + V(STR_b, "str", "'Bt") \ + V(STR_h, "str", "'Ht") \ + V(STR_s, "str", "'St") \ + V(STR_d, "str", "'Dt") \ + V(LDR_b, "ldr", "'Bt") \ + V(LDR_h, "ldr", "'Ht") \ + V(LDR_s, "ldr", "'St") \ + V(LDR_d, "ldr", "'Dt") \ + V(STR_q, "str", "'Qt") \ + V(LDR_q, "ldr", "'Qt") + +void Disassembler::VisitLoadStorePreIndex(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePreIndex)"; + + switch (instr->Mask(LoadStorePreIndexMask)) { +#define LS_PREINDEX(A, B, C) \ + case A##_pre: \ + mnemonic = B; \ + form = C ", ['Xns'ILSi]!"; \ + break; + LOAD_STORE_LIST(LS_PREINDEX) +#undef LS_PREINDEX + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStorePostIndex(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePostIndex)"; + + switch (instr->Mask(LoadStorePostIndexMask)) { +#define LS_POSTINDEX(A, B, C) \ + case A##_post: \ + mnemonic = B; \ + form = C ", ['Xns]'ILSi"; \ + break; + LOAD_STORE_LIST(LS_POSTINDEX) +#undef LS_POSTINDEX + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStoreUnsignedOffset(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStoreUnsignedOffset)"; + + switch (instr->Mask(LoadStoreUnsignedOffsetMask)) { +#define LS_UNSIGNEDOFFSET(A, B, C) \ + case A##_unsigned: \ + mnemonic = B; \ + form = C ", ['Xns'ILU]"; \ + break; + LOAD_STORE_LIST(LS_UNSIGNEDOFFSET) +#undef LS_UNSIGNEDOFFSET + case PRFM_unsigned: + mnemonic = "prfm"; + form = "'PrefOp, ['Xns'ILU]"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStoreRCpcUnscaledOffset(const Instruction *instr) { + const char *mnemonic; + const char *form = "'Wt, ['Xns'ILS]"; + const char *form_x = "'Xt, ['Xns'ILS]"; + + switch (instr->Mask(LoadStoreRCpcUnscaledOffsetMask)) { + case STLURB: + mnemonic = "stlurb"; + break; + case LDAPURB: + mnemonic = "ldapurb"; + break; + case LDAPURSB_w: + mnemonic = "ldapursb"; + break; + case LDAPURSB_x: + mnemonic = "ldapursb"; + form = form_x; + break; + case STLURH: + mnemonic = "stlurh"; + break; + case LDAPURH: + mnemonic = "ldapurh"; + break; + case LDAPURSH_w: + mnemonic = "ldapursh"; + break; + case LDAPURSH_x: + mnemonic = "ldapursh"; + form = form_x; + break; + case STLUR_w: + mnemonic = "stlur"; + break; + case LDAPUR_w: + mnemonic = "ldapur"; + break; + case LDAPURSW: + mnemonic = "ldapursw"; + form = form_x; + break; + case STLUR_x: + mnemonic = "stlur"; + form = form_x; + break; + case LDAPUR_x: + mnemonic = "ldapur"; + form = form_x; + break; + default: + mnemonic = "unimplemented"; + form = "(LoadStoreRCpcUnscaledOffset)"; + } + + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStoreRegisterOffset(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStoreRegisterOffset)"; + + switch (instr->Mask(LoadStoreRegisterOffsetMask)) { +#define LS_REGISTEROFFSET(A, B, C) \ + case A##_reg: \ + mnemonic = B; \ + form = C ", ['Xns, 'Offsetreg]"; \ + break; + LOAD_STORE_LIST(LS_REGISTEROFFSET) +#undef LS_REGISTEROFFSET + case PRFM_reg: + mnemonic = "prfm"; + form = "'PrefOp, ['Xns, 'Offsetreg]"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStoreUnscaledOffset(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Wt, ['Xns'ILS]"; + const char *form_x = "'Xt, ['Xns'ILS]"; + const char *form_b = "'Bt, ['Xns'ILS]"; + const char *form_h = "'Ht, ['Xns'ILS]"; + const char *form_s = "'St, ['Xns'ILS]"; + const char *form_d = "'Dt, ['Xns'ILS]"; + const char *form_q = "'Qt, ['Xns'ILS]"; + const char *form_prefetch = "'PrefOp, ['Xns'ILS]"; + + switch (instr->Mask(LoadStoreUnscaledOffsetMask)) { + case STURB_w: + mnemonic = "sturb"; + break; + case STURH_w: + mnemonic = "sturh"; + break; + case STUR_w: + mnemonic = "stur"; + break; + case STUR_x: + mnemonic = "stur"; + form = form_x; + break; + case STUR_b: + mnemonic = "stur"; + form = form_b; + break; + case STUR_h: + mnemonic = "stur"; + form = form_h; + break; + case STUR_s: + mnemonic = "stur"; + form = form_s; + break; + case STUR_d: + mnemonic = "stur"; + form = form_d; + break; + case STUR_q: + mnemonic = "stur"; + form = form_q; + break; + case LDURB_w: + mnemonic = "ldurb"; + break; + case LDURH_w: + mnemonic = "ldurh"; + break; + case LDUR_w: + mnemonic = "ldur"; + break; + case LDUR_x: + mnemonic = "ldur"; + form = form_x; + break; + case LDUR_b: + mnemonic = "ldur"; + form = form_b; + break; + case LDUR_h: + mnemonic = "ldur"; + form = form_h; + break; + case LDUR_s: + mnemonic = "ldur"; + form = form_s; + break; + case LDUR_d: + mnemonic = "ldur"; + form = form_d; + break; + case LDUR_q: + mnemonic = "ldur"; + form = form_q; + break; + case LDURSB_x: + form = form_x; + VIXL_FALLTHROUGH(); + case LDURSB_w: + mnemonic = "ldursb"; + break; + case LDURSH_x: + form = form_x; + VIXL_FALLTHROUGH(); + case LDURSH_w: + mnemonic = "ldursh"; + break; + case LDURSW_x: + mnemonic = "ldursw"; + form = form_x; + break; + case PRFUM: + mnemonic = "prfum"; + form = form_prefetch; + break; + default: + form = "(LoadStoreUnscaledOffset)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadLiteral(const Instruction *instr) { + const char *mnemonic = "ldr"; + const char *form = "(LoadLiteral)"; + + switch (instr->Mask(LoadLiteralMask)) { + case LDR_w_lit: + form = "'Wt, 'ILLiteral 'LValue"; + break; + case LDR_x_lit: + form = "'Xt, 'ILLiteral 'LValue"; + break; + case LDR_s_lit: + form = "'St, 'ILLiteral 'LValue"; + break; + case LDR_d_lit: + form = "'Dt, 'ILLiteral 'LValue"; + break; + case LDR_q_lit: + form = "'Qt, 'ILLiteral 'LValue"; + break; + case LDRSW_x_lit: { + mnemonic = "ldrsw"; + form = "'Xt, 'ILLiteral 'LValue"; + break; + } + case PRFM_lit: { + mnemonic = "prfm"; + form = "'PrefOp, 'ILLiteral 'LValue"; + break; + } + default: + mnemonic = "unimplemented"; + } + Format(instr, mnemonic, form); +} + + +#define LOAD_STORE_PAIR_LIST(V) \ + V(STP_w, "stp", "'Wt, 'Wt2", "2") \ + V(LDP_w, "ldp", "'Wt, 'Wt2", "2") \ + V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "2") \ + V(STP_x, "stp", "'Xt, 'Xt2", "3") \ + V(LDP_x, "ldp", "'Xt, 'Xt2", "3") \ + V(STP_s, "stp", "'St, 'St2", "2") \ + V(LDP_s, "ldp", "'St, 'St2", "2") \ + V(STP_d, "stp", "'Dt, 'Dt2", "3") \ + V(LDP_d, "ldp", "'Dt, 'Dt2", "3") \ + V(LDP_q, "ldp", "'Qt, 'Qt2", "4") \ + V(STP_q, "stp", "'Qt, 'Qt2", "4") + +void Disassembler::VisitLoadStorePairPostIndex(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePairPostIndex)"; + + switch (instr->Mask(LoadStorePairPostIndexMask)) { +#define LSP_POSTINDEX(A, B, C, D) \ + case A##_post: \ + mnemonic = B; \ + form = C ", ['Xns]'ILP" D "i"; \ + break; + LOAD_STORE_PAIR_LIST(LSP_POSTINDEX) +#undef LSP_POSTINDEX + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStorePairPreIndex(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePairPreIndex)"; + + switch (instr->Mask(LoadStorePairPreIndexMask)) { +#define LSP_PREINDEX(A, B, C, D) \ + case A##_pre: \ + mnemonic = B; \ + form = C ", ['Xns'ILP" D "i]!"; \ + break; + LOAD_STORE_PAIR_LIST(LSP_PREINDEX) +#undef LSP_PREINDEX + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStorePairOffset(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePairOffset)"; + + switch (instr->Mask(LoadStorePairOffsetMask)) { +#define LSP_OFFSET(A, B, C, D) \ + case A##_off: \ + mnemonic = B; \ + form = C ", ['Xns'ILP" D "]"; \ + break; + LOAD_STORE_PAIR_LIST(LSP_OFFSET) +#undef LSP_OFFSET + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitLoadStorePairNonTemporal(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form; + + switch (instr->Mask(LoadStorePairNonTemporalMask)) { + case STNP_w: + mnemonic = "stnp"; + form = "'Wt, 'Wt2, ['Xns'ILP2]"; + break; + case LDNP_w: + mnemonic = "ldnp"; + form = "'Wt, 'Wt2, ['Xns'ILP2]"; + break; + case STNP_x: + mnemonic = "stnp"; + form = "'Xt, 'Xt2, ['Xns'ILP3]"; + break; + case LDNP_x: + mnemonic = "ldnp"; + form = "'Xt, 'Xt2, ['Xns'ILP3]"; + break; + case STNP_s: + mnemonic = "stnp"; + form = "'St, 'St2, ['Xns'ILP2]"; + break; + case LDNP_s: + mnemonic = "ldnp"; + form = "'St, 'St2, ['Xns'ILP2]"; + break; + case STNP_d: + mnemonic = "stnp"; + form = "'Dt, 'Dt2, ['Xns'ILP3]"; + break; + case LDNP_d: + mnemonic = "ldnp"; + form = "'Dt, 'Dt2, ['Xns'ILP3]"; + break; + case STNP_q: + mnemonic = "stnp"; + form = "'Qt, 'Qt2, ['Xns'ILP4]"; + break; + case LDNP_q: + mnemonic = "ldnp"; + form = "'Qt, 'Qt2, ['Xns'ILP4]"; + break; + default: + form = "(LoadStorePairNonTemporal)"; + } + Format(instr, mnemonic, form); +} + +// clang-format off +#define LOAD_STORE_EXCLUSIVE_LIST(V) \ + V(STXRB_w, "stxrb", "'Ws, 'Wt") \ + V(STXRH_w, "stxrh", "'Ws, 'Wt") \ + V(STXR_w, "stxr", "'Ws, 'Wt") \ + V(STXR_x, "stxr", "'Ws, 'Xt") \ + V(LDXRB_w, "ldxrb", "'Wt") \ + V(LDXRH_w, "ldxrh", "'Wt") \ + V(LDXR_w, "ldxr", "'Wt") \ + V(LDXR_x, "ldxr", "'Xt") \ + V(STXP_w, "stxp", "'Ws, 'Wt, 'Wt2") \ + V(STXP_x, "stxp", "'Ws, 'Xt, 'Xt2") \ + V(LDXP_w, "ldxp", "'Wt, 'Wt2") \ + V(LDXP_x, "ldxp", "'Xt, 'Xt2") \ + V(STLXRB_w, "stlxrb", "'Ws, 'Wt") \ + V(STLXRH_w, "stlxrh", "'Ws, 'Wt") \ + V(STLXR_w, "stlxr", "'Ws, 'Wt") \ + V(STLXR_x, "stlxr", "'Ws, 'Xt") \ + V(LDAXRB_w, "ldaxrb", "'Wt") \ + V(LDAXRH_w, "ldaxrh", "'Wt") \ + V(LDAXR_w, "ldaxr", "'Wt") \ + V(LDAXR_x, "ldaxr", "'Xt") \ + V(STLXP_w, "stlxp", "'Ws, 'Wt, 'Wt2") \ + V(STLXP_x, "stlxp", "'Ws, 'Xt, 'Xt2") \ + V(LDAXP_w, "ldaxp", "'Wt, 'Wt2") \ + V(LDAXP_x, "ldaxp", "'Xt, 'Xt2") \ + V(STLRB_w, "stlrb", "'Wt") \ + V(STLRH_w, "stlrh", "'Wt") \ + V(STLR_w, "stlr", "'Wt") \ + V(STLR_x, "stlr", "'Xt") \ + V(LDARB_w, "ldarb", "'Wt") \ + V(LDARH_w, "ldarh", "'Wt") \ + V(LDAR_w, "ldar", "'Wt") \ + V(LDAR_x, "ldar", "'Xt") \ + V(STLLRB, "stllrb", "'Wt") \ + V(STLLRH, "stllrh", "'Wt") \ + V(STLLR_w, "stllr", "'Wt") \ + V(STLLR_x, "stllr", "'Xt") \ + V(LDLARB, "ldlarb", "'Wt") \ + V(LDLARH, "ldlarh", "'Wt") \ + V(LDLAR_w, "ldlar", "'Wt") \ + V(LDLAR_x, "ldlar", "'Xt") \ + V(CAS_w, "cas", "'Ws, 'Wt") \ + V(CAS_x, "cas", "'Xs, 'Xt") \ + V(CASA_w, "casa", "'Ws, 'Wt") \ + V(CASA_x, "casa", "'Xs, 'Xt") \ + V(CASL_w, "casl", "'Ws, 'Wt") \ + V(CASL_x, "casl", "'Xs, 'Xt") \ + V(CASAL_w, "casal", "'Ws, 'Wt") \ + V(CASAL_x, "casal", "'Xs, 'Xt") \ + V(CASB, "casb", "'Ws, 'Wt") \ + V(CASAB, "casab", "'Ws, 'Wt") \ + V(CASLB, "caslb", "'Ws, 'Wt") \ + V(CASALB, "casalb", "'Ws, 'Wt") \ + V(CASH, "cash", "'Ws, 'Wt") \ + V(CASAH, "casah", "'Ws, 'Wt") \ + V(CASLH, "caslh", "'Ws, 'Wt") \ + V(CASALH, "casalh", "'Ws, 'Wt") \ + V(CASP_w, "casp", "'Ws, 'W(s+1), 'Wt, 'W(t+1)") \ + V(CASP_x, "casp", "'Xs, 'X(s+1), 'Xt, 'X(t+1)") \ + V(CASPA_w, "caspa", "'Ws, 'W(s+1), 'Wt, 'W(t+1)") \ + V(CASPA_x, "caspa", "'Xs, 'X(s+1), 'Xt, 'X(t+1)") \ + V(CASPL_w, "caspl", "'Ws, 'W(s+1), 'Wt, 'W(t+1)") \ + V(CASPL_x, "caspl", "'Xs, 'X(s+1), 'Xt, 'X(t+1)") \ + V(CASPAL_w, "caspal", "'Ws, 'W(s+1), 'Wt, 'W(t+1)") \ + V(CASPAL_x, "caspal", "'Xs, 'X(s+1), 'Xt, 'X(t+1)") +// clang-format on + + +void Disassembler::VisitLoadStoreExclusive(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form; + + switch (instr->Mask(LoadStoreExclusiveMask)) { +#define LSX(A, B, C) \ + case A: \ + mnemonic = B; \ + form = C ", ['Xns]"; \ + break; + LOAD_STORE_EXCLUSIVE_LIST(LSX) +#undef LSX + default: + form = "(LoadStoreExclusive)"; + } + + switch (instr->Mask(LoadStoreExclusiveMask)) { + case CASP_w: + case CASP_x: + case CASPA_w: + case CASPA_x: + case CASPL_w: + case CASPL_x: + case CASPAL_w: + case CASPAL_x: + if ((instr->GetRs() % 2 == 1) || (instr->GetRt() % 2 == 1)) { + mnemonic = "unallocated"; + form = "(LoadStoreExclusive)"; + } + break; + } + + Format(instr, mnemonic, form); +} + +void Disassembler::VisitLoadStorePAC(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(LoadStorePAC)"; + + switch (instr->Mask(LoadStorePACMask)) { + case LDRAA: + mnemonic = "ldraa"; + form = "'Xt, ['Xns'ILA]"; + break; + case LDRAB: + mnemonic = "ldrab"; + form = "'Xt, ['Xns'ILA]"; + break; + case LDRAA_pre: + mnemonic = "ldraa"; + form = "'Xt, ['Xns'ILA]!"; + break; + case LDRAB_pre: + mnemonic = "ldrab"; + form = "'Xt, ['Xns'ILA]!"; + break; + } + + Format(instr, mnemonic, form); +} + +#define ATOMIC_MEMORY_SIMPLE_LIST(V) \ + V(LDADD, "add") \ + V(LDCLR, "clr") \ + V(LDEOR, "eor") \ + V(LDSET, "set") \ + V(LDSMAX, "smax") \ + V(LDSMIN, "smin") \ + V(LDUMAX, "umax") \ + V(LDUMIN, "umin") + +void Disassembler::VisitAtomicMemory(const Instruction *instr) { + const int kMaxAtomicOpMnemonicLength = 16; + const char *mnemonic; + const char *form = "'Ws, 'Wt, ['Xns]"; + + switch (instr->Mask(AtomicMemoryMask)) { +#define AMS(A, MN) \ + case A##B: \ + mnemonic = MN "b"; \ + break; \ + case A##AB: \ + mnemonic = MN "ab"; \ + break; \ + case A##LB: \ + mnemonic = MN "lb"; \ + break; \ + case A##ALB: \ + mnemonic = MN "alb"; \ + break; \ + case A##H: \ + mnemonic = MN "h"; \ + break; \ + case A##AH: \ + mnemonic = MN "ah"; \ + break; \ + case A##LH: \ + mnemonic = MN "lh"; \ + break; \ + case A##ALH: \ + mnemonic = MN "alh"; \ + break; \ + case A##_w: \ + mnemonic = MN; \ + break; \ + case A##A_w: \ + mnemonic = MN "a"; \ + break; \ + case A##L_w: \ + mnemonic = MN "l"; \ + break; \ + case A##AL_w: \ + mnemonic = MN "al"; \ + break; \ + case A##_x: \ + mnemonic = MN; \ + form = "'Xs, 'Xt, ['Xns]"; \ + break; \ + case A##A_x: \ + mnemonic = MN "a"; \ + form = "'Xs, 'Xt, ['Xns]"; \ + break; \ + case A##L_x: \ + mnemonic = MN "l"; \ + form = "'Xs, 'Xt, ['Xns]"; \ + break; \ + case A##AL_x: \ + mnemonic = MN "al"; \ + form = "'Xs, 'Xt, ['Xns]"; \ + break; + ATOMIC_MEMORY_SIMPLE_LIST(AMS) + + // SWP has the same semantics as ldadd etc but without the store aliases. + AMS(SWP, "swp") +#undef AMS + + case LDAPRB: + mnemonic = "ldaprb"; + form = "'Wt, ['Xns]"; + break; + case LDAPRH: + mnemonic = "ldaprh"; + form = "'Wt, ['Xns]"; + break; + case LDAPR_w: + mnemonic = "ldapr"; + form = "'Wt, ['Xns]"; + break; + case LDAPR_x: + mnemonic = "ldapr"; + form = "'Xt, ['Xns]"; + break; + default: + mnemonic = "unimplemented"; + form = "(AtomicMemory)"; + } + + const char *prefix = ""; + switch (instr->Mask(AtomicMemoryMask)) { +#define AMS(A, MN) \ + case A##AB: \ + case A##ALB: \ + case A##AH: \ + case A##ALH: \ + case A##A_w: \ + case A##AL_w: \ + case A##A_x: \ + case A##AL_x: \ + prefix = "ld"; \ + break; \ + case A##B: \ + case A##LB: \ + case A##H: \ + case A##LH: \ + case A##_w: \ + case A##L_w: { \ + prefix = "ld"; \ + unsigned rt = instr->GetRt(); \ + if (Register(rt, 32).IsZero()) { \ + prefix = "st"; \ + form = "'Ws, ['Xns]"; \ + } \ + break; \ + } \ + case A##_x: \ + case A##L_x: { \ + prefix = "ld"; \ + unsigned rt = instr->GetRt(); \ + if (Register(rt, 64).IsZero()) { \ + prefix = "st"; \ + form = "'Xs, ['Xns]"; \ + } \ + break; \ + } + ATOMIC_MEMORY_SIMPLE_LIST(AMS) +#undef AMS + } + + char buffer[kMaxAtomicOpMnemonicLength]; + if (strlen(prefix) > 0) { + snprintf(buffer, kMaxAtomicOpMnemonicLength, "%s%s", prefix, mnemonic); + mnemonic = buffer; + } + + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPCompare(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Fn, 'Fm"; + const char *form_zero = "'Fn, #0.0"; + + switch (instr->Mask(FPCompareMask)) { + case FCMP_h_zero: + case FCMP_s_zero: + case FCMP_d_zero: + form = form_zero; + VIXL_FALLTHROUGH(); + case FCMP_h: + case FCMP_s: + case FCMP_d: + mnemonic = "fcmp"; + break; + case FCMPE_h_zero: + case FCMPE_s_zero: + case FCMPE_d_zero: + form = form_zero; + VIXL_FALLTHROUGH(); + case FCMPE_h: + case FCMPE_s: + case FCMPE_d: + mnemonic = "fcmpe"; + break; + default: + form = "(FPCompare)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPConditionalCompare(const Instruction *instr) { + const char *mnemonic = "unmplemented"; + const char *form = "'Fn, 'Fm, 'INzcv, 'Cond"; + + switch (instr->Mask(FPConditionalCompareMask)) { + case FCCMP_h: + case FCCMP_s: + case FCCMP_d: + mnemonic = "fccmp"; + break; + case FCCMPE_h: + case FCCMPE_s: + case FCCMPE_d: + mnemonic = "fccmpe"; + break; + default: + form = "(FPConditionalCompare)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPConditionalSelect(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Fd, 'Fn, 'Fm, 'Cond"; + + switch (instr->Mask(FPConditionalSelectMask)) { + case FCSEL_h: + case FCSEL_s: + case FCSEL_d: + mnemonic = "fcsel"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPDataProcessing1Source(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Fd, 'Fn"; + + switch (instr->Mask(FPDataProcessing1SourceMask)) { +#define FORMAT(A, B) \ + case A##_h: \ + case A##_s: \ + case A##_d: \ + mnemonic = B; \ + break; + FORMAT(FMOV, "fmov"); + FORMAT(FABS, "fabs"); + FORMAT(FNEG, "fneg"); + FORMAT(FSQRT, "fsqrt"); + FORMAT(FRINTN, "frintn"); + FORMAT(FRINTP, "frintp"); + FORMAT(FRINTM, "frintm"); + FORMAT(FRINTZ, "frintz"); + FORMAT(FRINTA, "frinta"); + FORMAT(FRINTX, "frintx"); + FORMAT(FRINTI, "frinti"); +#undef FORMAT + case FCVT_ds: + mnemonic = "fcvt"; + form = "'Dd, 'Sn"; + break; + case FCVT_sd: + mnemonic = "fcvt"; + form = "'Sd, 'Dn"; + break; + case FCVT_hs: + mnemonic = "fcvt"; + form = "'Hd, 'Sn"; + break; + case FCVT_sh: + mnemonic = "fcvt"; + form = "'Sd, 'Hn"; + break; + case FCVT_dh: + mnemonic = "fcvt"; + form = "'Dd, 'Hn"; + break; + case FCVT_hd: + mnemonic = "fcvt"; + form = "'Hd, 'Dn"; + break; + default: + form = "(FPDataProcessing1Source)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPDataProcessing2Source(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Fd, 'Fn, 'Fm"; + + switch (instr->Mask(FPDataProcessing2SourceMask)) { +#define FORMAT(A, B) \ + case A##_h: \ + case A##_s: \ + case A##_d: \ + mnemonic = B; \ + break; + FORMAT(FADD, "fadd"); + FORMAT(FSUB, "fsub"); + FORMAT(FMUL, "fmul"); + FORMAT(FDIV, "fdiv"); + FORMAT(FMAX, "fmax"); + FORMAT(FMIN, "fmin"); + FORMAT(FMAXNM, "fmaxnm"); + FORMAT(FMINNM, "fminnm"); + FORMAT(FNMUL, "fnmul"); +#undef FORMAT + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPDataProcessing3Source(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Fd, 'Fn, 'Fm, 'Fa"; + + switch (instr->Mask(FPDataProcessing3SourceMask)) { +#define FORMAT(A, B) \ + case A##_h: \ + case A##_s: \ + case A##_d: \ + mnemonic = B; \ + break; + FORMAT(FMADD, "fmadd"); + FORMAT(FMSUB, "fmsub"); + FORMAT(FNMADD, "fnmadd"); + FORMAT(FNMSUB, "fnmsub"); +#undef FORMAT + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPImmediate(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "(FPImmediate)"; + switch (instr->Mask(FPImmediateMask)) { + case FMOV_h_imm: + mnemonic = "fmov"; + form = "'Hd, 'IFPHalf"; + break; + case FMOV_s_imm: + mnemonic = "fmov"; + form = "'Sd, 'IFPSingle"; + break; + case FMOV_d_imm: + mnemonic = "fmov"; + form = "'Dd, 'IFPDouble"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPIntegerConvert(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(FPIntegerConvert)"; + const char *form_rf = "'Rd, 'Fn"; + const char *form_fr = "'Fd, 'Rn"; + + switch (instr->Mask(FPIntegerConvertMask)) { + case FMOV_wh: + case FMOV_xh: + case FMOV_ws: + case FMOV_xd: + mnemonic = "fmov"; + form = form_rf; + break; + case FMOV_hw: + case FMOV_hx: + case FMOV_sw: + case FMOV_dx: + mnemonic = "fmov"; + form = form_fr; + break; + case FMOV_d1_x: + mnemonic = "fmov"; + form = "'Vd.D[1], 'Rn"; + break; + case FMOV_x_d1: + mnemonic = "fmov"; + form = "'Rd, 'Vn.D[1]"; + break; + case FCVTAS_wh: + case FCVTAS_xh: + case FCVTAS_ws: + case FCVTAS_xs: + case FCVTAS_wd: + case FCVTAS_xd: + mnemonic = "fcvtas"; + form = form_rf; + break; + case FCVTAU_wh: + case FCVTAU_xh: + case FCVTAU_ws: + case FCVTAU_xs: + case FCVTAU_wd: + case FCVTAU_xd: + mnemonic = "fcvtau"; + form = form_rf; + break; + case FCVTMS_wh: + case FCVTMS_xh: + case FCVTMS_ws: + case FCVTMS_xs: + case FCVTMS_wd: + case FCVTMS_xd: + mnemonic = "fcvtms"; + form = form_rf; + break; + case FCVTMU_wh: + case FCVTMU_xh: + case FCVTMU_ws: + case FCVTMU_xs: + case FCVTMU_wd: + case FCVTMU_xd: + mnemonic = "fcvtmu"; + form = form_rf; + break; + case FCVTNS_wh: + case FCVTNS_xh: + case FCVTNS_ws: + case FCVTNS_xs: + case FCVTNS_wd: + case FCVTNS_xd: + mnemonic = "fcvtns"; + form = form_rf; + break; + case FCVTNU_wh: + case FCVTNU_xh: + case FCVTNU_ws: + case FCVTNU_xs: + case FCVTNU_wd: + case FCVTNU_xd: + mnemonic = "fcvtnu"; + form = form_rf; + break; + case FCVTZU_wh: + case FCVTZU_xh: + case FCVTZU_ws: + case FCVTZU_xs: + case FCVTZU_wd: + case FCVTZU_xd: + mnemonic = "fcvtzu"; + form = form_rf; + break; + case FCVTZS_wh: + case FCVTZS_xh: + case FCVTZS_ws: + case FCVTZS_xs: + case FCVTZS_wd: + case FCVTZS_xd: + mnemonic = "fcvtzs"; + form = form_rf; + break; + case FCVTPU_wh: + case FCVTPU_xh: + case FCVTPU_xs: + case FCVTPU_wd: + case FCVTPU_ws: + case FCVTPU_xd: + mnemonic = "fcvtpu"; + form = form_rf; + break; + case FCVTPS_wh: + case FCVTPS_xh: + case FCVTPS_ws: + case FCVTPS_xs: + case FCVTPS_wd: + case FCVTPS_xd: + mnemonic = "fcvtps"; + form = form_rf; + break; + case SCVTF_hw: + case SCVTF_hx: + case SCVTF_sw: + case SCVTF_sx: + case SCVTF_dw: + case SCVTF_dx: + mnemonic = "scvtf"; + form = form_fr; + break; + case UCVTF_hw: + case UCVTF_hx: + case UCVTF_sw: + case UCVTF_sx: + case UCVTF_dw: + case UCVTF_dx: + mnemonic = "ucvtf"; + form = form_fr; + break; + case FJCVTZS: + mnemonic = "fjcvtzs"; + form = form_rf; + break; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitFPFixedPointConvert(const Instruction *instr) { + const char *mnemonic = ""; + const char *form = "'Rd, 'Fn, 'IFPFBits"; + const char *form_fr = "'Fd, 'Rn, 'IFPFBits"; + + switch (instr->Mask(FPFixedPointConvertMask)) { + case FCVTZS_wh_fixed: + case FCVTZS_xh_fixed: + case FCVTZS_ws_fixed: + case FCVTZS_xs_fixed: + case FCVTZS_wd_fixed: + case FCVTZS_xd_fixed: + mnemonic = "fcvtzs"; + break; + case FCVTZU_wh_fixed: + case FCVTZU_xh_fixed: + case FCVTZU_ws_fixed: + case FCVTZU_xs_fixed: + case FCVTZU_wd_fixed: + case FCVTZU_xd_fixed: + mnemonic = "fcvtzu"; + break; + case SCVTF_hw_fixed: + case SCVTF_hx_fixed: + case SCVTF_sw_fixed: + case SCVTF_sx_fixed: + case SCVTF_dw_fixed: + case SCVTF_dx_fixed: + mnemonic = "scvtf"; + form = form_fr; + break; + case UCVTF_hw_fixed: + case UCVTF_hx_fixed: + case UCVTF_sw_fixed: + case UCVTF_sx_fixed: + case UCVTF_dw_fixed: + case UCVTF_dx_fixed: + mnemonic = "ucvtf"; + form = form_fr; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + +// clang-format off +#define PAUTH_SYSTEM_MNEMONICS(V) \ + V(PACIA1716, "pacia1716") \ + V(PACIB1716, "pacib1716") \ + V(AUTIA1716, "autia1716") \ + V(AUTIB1716, "autib1716") \ + V(PACIAZ, "paciaz") \ + V(PACIASP, "paciasp") \ + V(PACIBZ, "pacibz") \ + V(PACIBSP, "pacibsp") \ + V(AUTIAZ, "autiaz") \ + V(AUTIASP, "autiasp") \ + V(AUTIBZ, "autibz") \ + V(AUTIBSP, "autibsp") +// clang-format on + +void Disassembler::VisitSystem(const Instruction *instr) { + // Some system instructions hijack their Op and Cp fields to represent a + // range of immediates instead of indicating a different instruction. This + // makes the decoding tricky. + const char *mnemonic = "unimplemented"; + const char *form = "(System)"; + if (instr->GetInstructionBits() == XPACLRI) { + mnemonic = "xpaclri"; + form = NULL; + } else if (instr->Mask(SystemPStateFMask) == SystemPStateFixed) { + switch (instr->Mask(SystemPStateMask)) { + case CFINV: + mnemonic = "cfinv"; + form = NULL; + break; + case AXFLAG: + mnemonic = "axflag"; + form = NULL; + break; + case XAFLAG: + mnemonic = "xaflag"; + form = NULL; + break; + } + } else if (instr->Mask(SystemPAuthFMask) == SystemPAuthFixed) { + switch (instr->Mask(SystemPAuthMask)) { +#define PAUTH_CASE(NAME, MN) \ + case NAME: \ + mnemonic = MN; \ + form = NULL; \ + break; + + PAUTH_SYSTEM_MNEMONICS(PAUTH_CASE) +#undef PAUTH_CASE + } + } else if (instr->Mask(SystemExclusiveMonitorFMask) == + SystemExclusiveMonitorFixed) { + switch (instr->Mask(SystemExclusiveMonitorMask)) { + case CLREX: { + mnemonic = "clrex"; + form = (instr->GetCRm() == 0xf) ? NULL : "'IX"; + break; + } + } + } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) { + switch (instr->Mask(SystemSysRegMask)) { + case MRS: { + mnemonic = "mrs"; + form = "'Xt, 'IY"; + break; + } + case MSR: { + mnemonic = "msr"; + form = "'IY, 'Xt"; + break; + } + } + } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) { + form = NULL; + switch (instr->GetImmHint()) { + case NOP: + mnemonic = "nop"; + break; + case YIELD: + mnemonic = "yield"; + break; + case WFE: + mnemonic = "wfe"; + break; + case WFI: + mnemonic = "wfi"; + break; + case SEV: + mnemonic = "sev"; + break; + case SEVL: + mnemonic = "sevl"; + break; + case ESB: + mnemonic = "esb"; + break; + case CSDB: + mnemonic = "csdb"; + break; + case BTI: + mnemonic = "bti"; + break; + case BTI_c: + mnemonic = "bti c"; + break; + case BTI_j: + mnemonic = "bti j"; + break; + case BTI_jc: + mnemonic = "bti jc"; + break; + default: + // Fall back to 'hint #'. + form = "'IH"; + mnemonic = "hint"; + break; + } + } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) { + switch (instr->Mask(MemBarrierMask)) { + case DMB: { + mnemonic = "dmb"; + form = "'M"; + break; + } + case DSB: { + mnemonic = "dsb"; + form = "'M"; + break; + } + case ISB: { + mnemonic = "isb"; + form = NULL; + break; + } + } + } else if (instr->Mask(SystemSysFMask) == SystemSysFixed) { + switch (instr->GetSysOp()) { + case IVAU: + mnemonic = "ic"; + form = "ivau, 'Xt"; + break; + case CVAC: + mnemonic = "dc"; + form = "cvac, 'Xt"; + break; + case CVAU: + mnemonic = "dc"; + form = "cvau, 'Xt"; + break; + case CVAP: + mnemonic = "dc"; + form = "cvap, 'Xt"; + break; + case CIVAC: + mnemonic = "dc"; + form = "civac, 'Xt"; + break; + case ZVA: + mnemonic = "dc"; + form = "zva, 'Xt"; + break; + default: + mnemonic = "sys"; + if (instr->GetRt() == 31) { + form = "'G1, 'Kn, 'Km, 'G2"; + } else { + form = "'G1, 'Kn, 'Km, 'G2, 'Xt"; + } + break; + } + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitException(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'IDebug"; + + switch (instr->Mask(ExceptionMask)) { + case HLT: + mnemonic = "hlt"; + break; + case BRK: + mnemonic = "brk"; + break; + case SVC: + mnemonic = "svc"; + break; + case HVC: + mnemonic = "hvc"; + break; + case SMC: + mnemonic = "smc"; + break; + case DCPS1: + mnemonic = "dcps1"; + form = "{'IDebug}"; + break; + case DCPS2: + mnemonic = "dcps2"; + form = "{'IDebug}"; + break; + case DCPS3: + mnemonic = "dcps3"; + form = "{'IDebug}"; + break; + default: + form = "(Exception)"; + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitCrypto2RegSHA(const Instruction *instr) { + VisitUnimplemented(instr); +} + + +void Disassembler::VisitCrypto3RegSHA(const Instruction *instr) { + VisitUnimplemented(instr); +} + + +void Disassembler::VisitCryptoAES(const Instruction *instr) { + VisitUnimplemented(instr); +} + + +void Disassembler::VisitNEON2RegMisc(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s"; + const char *form_cmp_zero = "'Vd.%s, 'Vn.%s, #0"; + const char *form_fcmp_zero = "'Vd.%s, 'Vn.%s, #0.0"; + NEONFormatDecoder nfd(instr); + + static const NEONFormatMap map_lp_ta = + {{23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}}; + + static const NEONFormatMap map_cvt_ta = {{22}, {NF_4S, NF_2D}}; + + static const NEONFormatMap map_cvt_tb = {{22, 30}, + {NF_4H, NF_8H, NF_2S, NF_4S}}; + + if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) { + // These instructions all use a two bit size field, except NOT and RBIT, + // which use the field to encode the operation. + switch (instr->Mask(NEON2RegMiscMask)) { + case NEON_REV64: + mnemonic = "rev64"; + break; + case NEON_REV32: + mnemonic = "rev32"; + break; + case NEON_REV16: + mnemonic = "rev16"; + break; + case NEON_SADDLP: + mnemonic = "saddlp"; + nfd.SetFormatMap(0, &map_lp_ta); + break; + case NEON_UADDLP: + mnemonic = "uaddlp"; + nfd.SetFormatMap(0, &map_lp_ta); + break; + case NEON_SUQADD: + mnemonic = "suqadd"; + break; + case NEON_USQADD: + mnemonic = "usqadd"; + break; + case NEON_CLS: + mnemonic = "cls"; + break; + case NEON_CLZ: + mnemonic = "clz"; + break; + case NEON_CNT: + mnemonic = "cnt"; + break; + case NEON_SADALP: + mnemonic = "sadalp"; + nfd.SetFormatMap(0, &map_lp_ta); + break; + case NEON_UADALP: + mnemonic = "uadalp"; + nfd.SetFormatMap(0, &map_lp_ta); + break; + case NEON_SQABS: + mnemonic = "sqabs"; + break; + case NEON_SQNEG: + mnemonic = "sqneg"; + break; + case NEON_CMGT_zero: + mnemonic = "cmgt"; + form = form_cmp_zero; + break; + case NEON_CMGE_zero: + mnemonic = "cmge"; + form = form_cmp_zero; + break; + case NEON_CMEQ_zero: + mnemonic = "cmeq"; + form = form_cmp_zero; + break; + case NEON_CMLE_zero: + mnemonic = "cmle"; + form = form_cmp_zero; + break; + case NEON_CMLT_zero: + mnemonic = "cmlt"; + form = form_cmp_zero; + break; + case NEON_ABS: + mnemonic = "abs"; + break; + case NEON_NEG: + mnemonic = "neg"; + break; + case NEON_RBIT_NOT: + switch (instr->GetFPType()) { + case 0: + mnemonic = "mvn"; + break; + case 1: + mnemonic = "rbit"; + break; + default: + form = "(NEON2RegMisc)"; + } + nfd.SetFormatMaps(nfd.LogicalFormatMap()); + break; + } + } else { + // These instructions all use a one bit size field, except XTN, SQXTUN, + // SHLL, SQXTN and UQXTN, which use a two bit size field. + nfd.SetFormatMaps(nfd.FPFormatMap()); + switch (instr->Mask(NEON2RegMiscFPMask)) { + case NEON_FABS: + mnemonic = "fabs"; + break; + case NEON_FNEG: + mnemonic = "fneg"; + break; + case NEON_FCVTN: + mnemonic = instr->Mask(NEON_Q) ? "fcvtn2" : "fcvtn"; + nfd.SetFormatMap(0, &map_cvt_tb); + nfd.SetFormatMap(1, &map_cvt_ta); + break; + case NEON_FCVTXN: + mnemonic = instr->Mask(NEON_Q) ? "fcvtxn2" : "fcvtxn"; + nfd.SetFormatMap(0, &map_cvt_tb); + nfd.SetFormatMap(1, &map_cvt_ta); + break; + case NEON_FCVTL: + mnemonic = instr->Mask(NEON_Q) ? "fcvtl2" : "fcvtl"; + nfd.SetFormatMap(0, &map_cvt_ta); + nfd.SetFormatMap(1, &map_cvt_tb); + break; + case NEON_FRINTN: + mnemonic = "frintn"; + break; + case NEON_FRINTA: + mnemonic = "frinta"; + break; + case NEON_FRINTP: + mnemonic = "frintp"; + break; + case NEON_FRINTM: + mnemonic = "frintm"; + break; + case NEON_FRINTX: + mnemonic = "frintx"; + break; + case NEON_FRINTZ: + mnemonic = "frintz"; + break; + case NEON_FRINTI: + mnemonic = "frinti"; + break; + case NEON_FCVTNS: + mnemonic = "fcvtns"; + break; + case NEON_FCVTNU: + mnemonic = "fcvtnu"; + break; + case NEON_FCVTPS: + mnemonic = "fcvtps"; + break; + case NEON_FCVTPU: + mnemonic = "fcvtpu"; + break; + case NEON_FCVTMS: + mnemonic = "fcvtms"; + break; + case NEON_FCVTMU: + mnemonic = "fcvtmu"; + break; + case NEON_FCVTZS: + mnemonic = "fcvtzs"; + break; + case NEON_FCVTZU: + mnemonic = "fcvtzu"; + break; + case NEON_FCVTAS: + mnemonic = "fcvtas"; + break; + case NEON_FCVTAU: + mnemonic = "fcvtau"; + break; + case NEON_FSQRT: + mnemonic = "fsqrt"; + break; + case NEON_SCVTF: + mnemonic = "scvtf"; + break; + case NEON_UCVTF: + mnemonic = "ucvtf"; + break; + case NEON_URSQRTE: + mnemonic = "ursqrte"; + break; + case NEON_URECPE: + mnemonic = "urecpe"; + break; + case NEON_FRSQRTE: + mnemonic = "frsqrte"; + break; + case NEON_FRECPE: + mnemonic = "frecpe"; + break; + case NEON_FCMGT_zero: + mnemonic = "fcmgt"; + form = form_fcmp_zero; + break; + case NEON_FCMGE_zero: + mnemonic = "fcmge"; + form = form_fcmp_zero; + break; + case NEON_FCMEQ_zero: + mnemonic = "fcmeq"; + form = form_fcmp_zero; + break; + case NEON_FCMLE_zero: + mnemonic = "fcmle"; + form = form_fcmp_zero; + break; + case NEON_FCMLT_zero: + mnemonic = "fcmlt"; + form = form_fcmp_zero; + break; + default: + if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) && + (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) { + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + + switch (instr->Mask(NEON2RegMiscMask)) { + case NEON_XTN: + mnemonic = "xtn"; + break; + case NEON_SQXTN: + mnemonic = "sqxtn"; + break; + case NEON_UQXTN: + mnemonic = "uqxtn"; + break; + case NEON_SQXTUN: + mnemonic = "sqxtun"; + break; + case NEON_SHLL: + mnemonic = "shll"; + nfd.SetFormatMap(0, nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(1, nfd.IntegerFormatMap()); + switch (instr->GetNEONSize()) { + case 0: + form = "'Vd.%s, 'Vn.%s, #8"; + break; + case 1: + form = "'Vd.%s, 'Vn.%s, #16"; + break; + case 2: + form = "'Vd.%s, 'Vn.%s, #32"; + break; + default: + Format(instr, "unallocated", "(NEON2RegMisc)"); + return; + } + } + Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form)); + return; + } else { + form = "(NEON2RegMisc)"; + } + } + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + +void Disassembler::VisitNEON2RegMiscFP16(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s"; + const char *form_cmp = "'Vd.%s, 'Vn.%s, #0.0"; + + static const NEONFormatMap map_half = {{30}, {NF_4H, NF_8H}}; + NEONFormatDecoder nfd(instr, &map_half); + + switch (instr->Mask(NEON2RegMiscFP16Mask)) { +// clang-format off +#define FORMAT(A, B) \ + case NEON_##A##_H: \ + mnemonic = B; \ + break; + FORMAT(FABS, "fabs") + FORMAT(FCVTAS, "fcvtas") + FORMAT(FCVTAU, "fcvtau") + FORMAT(FCVTMS, "fcvtms") + FORMAT(FCVTMU, "fcvtmu") + FORMAT(FCVTNS, "fcvtns") + FORMAT(FCVTNU, "fcvtnu") + FORMAT(FCVTPS, "fcvtps") + FORMAT(FCVTPU, "fcvtpu") + FORMAT(FCVTZS, "fcvtzs") + FORMAT(FCVTZU, "fcvtzu") + FORMAT(FNEG, "fneg") + FORMAT(FRECPE, "frecpe") + FORMAT(FRINTA, "frinta") + FORMAT(FRINTI, "frinti") + FORMAT(FRINTM, "frintm") + FORMAT(FRINTN, "frintn") + FORMAT(FRINTP, "frintp") + FORMAT(FRINTX, "frintx") + FORMAT(FRINTZ, "frintz") + FORMAT(FRSQRTE, "frsqrte") + FORMAT(FSQRT, "fsqrt") + FORMAT(SCVTF, "scvtf") + FORMAT(UCVTF, "ucvtf") +// clang-format on +#undef FORMAT + + case NEON_FCMEQ_H_zero: + mnemonic = "fcmeq"; + form = form_cmp; + break; + case NEON_FCMGT_H_zero: + mnemonic = "fcmgt"; + form = form_cmp; + break; + case NEON_FCMGE_H_zero: + mnemonic = "fcmge"; + form = form_cmp; + break; + case NEON_FCMLT_H_zero: + mnemonic = "fcmlt"; + form = form_cmp; + break; + case NEON_FCMLE_H_zero: + mnemonic = "fcmle"; + form = form_cmp; + break; + default: + form = "(NEON2RegMiscFP16)"; + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEON3Same(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; + NEONFormatDecoder nfd(instr); + + if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) { + switch (instr->Mask(NEON3SameLogicalMask)) { + case NEON_AND: + mnemonic = "and"; + break; + case NEON_ORR: + mnemonic = "orr"; + if (instr->GetRm() == instr->GetRn()) { + mnemonic = "mov"; + form = "'Vd.%s, 'Vn.%s"; + } + break; + case NEON_ORN: + mnemonic = "orn"; + break; + case NEON_EOR: + mnemonic = "eor"; + break; + case NEON_BIC: + mnemonic = "bic"; + break; + case NEON_BIF: + mnemonic = "bif"; + break; + case NEON_BIT: + mnemonic = "bit"; + break; + case NEON_BSL: + mnemonic = "bsl"; + break; + default: + form = "(NEON3Same)"; + } + nfd.SetFormatMaps(nfd.LogicalFormatMap()); + } else { + static const char kUnknown[] = "unallocated"; + static const char *mnemonics[] = {"shadd", + "uhadd", + "shadd", + "uhadd", + "sqadd", + "uqadd", + "sqadd", + "uqadd", + "srhadd", + "urhadd", + "srhadd", + "urhadd", + // Handled by logical cases above. + NULL, + NULL, + NULL, + NULL, + "shsub", + "uhsub", + "shsub", + "uhsub", + "sqsub", + "uqsub", + "sqsub", + "uqsub", + "cmgt", + "cmhi", + "cmgt", + "cmhi", + "cmge", + "cmhs", + "cmge", + "cmhs", + "sshl", + "ushl", + "sshl", + "ushl", + "sqshl", + "uqshl", + "sqshl", + "uqshl", + "srshl", + "urshl", + "srshl", + "urshl", + "sqrshl", + "uqrshl", + "sqrshl", + "uqrshl", + "smax", + "umax", + "smax", + "umax", + "smin", + "umin", + "smin", + "umin", + "sabd", + "uabd", + "sabd", + "uabd", + "saba", + "uaba", + "saba", + "uaba", + "add", + "sub", + "add", + "sub", + "cmtst", + "cmeq", + "cmtst", + "cmeq", + "mla", + "mls", + "mla", + "mls", + "mul", + "pmul", + "mul", + "pmul", + "smaxp", + "umaxp", + "smaxp", + "umaxp", + "sminp", + "uminp", + "sminp", + "uminp", + "sqdmulh", + "sqrdmulh", + "sqdmulh", + "sqrdmulh", + "addp", + kUnknown, + "addp", + kUnknown, + "fmaxnm", + "fmaxnmp", + "fminnm", + "fminnmp", + "fmla", + kUnknown, // FMLAL2 or unallocated + "fmls", + kUnknown, // FMLSL2 or unallocated + "fadd", + "faddp", + "fsub", + "fabd", + "fmulx", + "fmul", + kUnknown, + kUnknown, + "fcmeq", + "fcmge", + kUnknown, + "fcmgt", + kUnknown, // FMLAL or unallocated + "facge", + kUnknown, // FMLSL or unallocated + "facgt", + "fmax", + "fmaxp", + "fmin", + "fminp", + "frecps", + "fdiv", + "frsqrts", + kUnknown}; + + // Operation is determined by the opcode bits (15-11), the top bit of + // size (23) and the U bit (29). + unsigned index = (instr->ExtractBits(15, 11) << 2) | + (instr->ExtractBit(23) << 1) | instr->ExtractBit(29); + VIXL_ASSERT(index < ArrayLength(mnemonics)); + mnemonic = mnemonics[index]; + // Assert that index is not one of the previously handled logical + // instructions. + VIXL_ASSERT(mnemonic != NULL); + + if (mnemonic == kUnknown) { + // Catch special cases where we need to check more bits than we have in + // the table index. Anything not matched here is unallocated. + + const char *fhm_form = (instr->Mask(NEON_Q) == 0) + ? "'Vd.2s, 'Vn.2h, 'Vm.2h" + : "'Vd.4s, 'Vn.4h, 'Vm.4h"; + switch (instr->Mask(NEON3SameFHMMask)) { + case NEON_FMLAL: + mnemonic = "fmlal"; + form = fhm_form; + break; + case NEON_FMLAL2: + mnemonic = "fmlal2"; + form = fhm_form; + break; + case NEON_FMLSL: + mnemonic = "fmlsl"; + form = fhm_form; + break; + case NEON_FMLSL2: + mnemonic = "fmlsl2"; + form = fhm_form; + break; + default: + VIXL_ASSERT(strcmp(mnemonic, "unallocated") == 0); + form = "(NEON3Same)"; + break; + } + } + + if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) { + nfd.SetFormatMaps(nfd.FPFormatMap()); + } + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + +void Disassembler::VisitNEON3SameFP16(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; + + NEONFormatDecoder nfd(instr); + nfd.SetFormatMaps(nfd.FP16FormatMap()); + + switch (instr->Mask(NEON3SameFP16Mask)) { +#define FORMAT(A, B) \ + case NEON_##A##_H: \ + mnemonic = B; \ + break; + FORMAT(FMAXNM, "fmaxnm"); + FORMAT(FMLA, "fmla"); + FORMAT(FADD, "fadd"); + FORMAT(FMULX, "fmulx"); + FORMAT(FCMEQ, "fcmeq"); + FORMAT(FMAX, "fmax"); + FORMAT(FRECPS, "frecps"); + FORMAT(FMINNM, "fminnm"); + FORMAT(FMLS, "fmls"); + FORMAT(FSUB, "fsub"); + FORMAT(FMIN, "fmin"); + FORMAT(FRSQRTS, "frsqrts"); + FORMAT(FMAXNMP, "fmaxnmp"); + FORMAT(FADDP, "faddp"); + FORMAT(FMUL, "fmul"); + FORMAT(FCMGE, "fcmge"); + FORMAT(FACGE, "facge"); + FORMAT(FMAXP, "fmaxp"); + FORMAT(FDIV, "fdiv"); + FORMAT(FMINNMP, "fminnmp"); + FORMAT(FABD, "fabd"); + FORMAT(FCMGT, "fcmgt"); + FORMAT(FACGT, "facgt"); + FORMAT(FMINP, "fminp"); +#undef FORMAT + default: + form = "(NEON3SameFP16)"; + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + +void Disassembler::VisitNEON3SameExtra(const Instruction *instr) { + static const NEONFormatMap map_usdot = {{30}, {NF_8B, NF_16B}}; + + const char *mnemonic = "unallocated"; + const char *form = "(NEON3SameExtra)"; + + NEONFormatDecoder nfd(instr); + + if (instr->Mask(NEON3SameExtraFCMLAMask) == NEON_FCMLA) { + mnemonic = "fcmla"; + form = "'Vd.%s, 'Vn.%s, 'Vm.%s, 'IVFCNM"; + } else if (instr->Mask(NEON3SameExtraFCADDMask) == NEON_FCADD) { + mnemonic = "fcadd"; + form = "'Vd.%s, 'Vn.%s, 'Vm.%s, 'IVFCNA"; + } else { + form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; + switch (instr->Mask(NEON3SameExtraMask)) { + case NEON_SDOT: + mnemonic = "sdot"; + nfd.SetFormatMap(1, &map_usdot); + nfd.SetFormatMap(2, &map_usdot); + break; + case NEON_SQRDMLAH: + mnemonic = "sqrdmlah"; + break; + case NEON_UDOT: + mnemonic = "udot"; + nfd.SetFormatMap(1, &map_usdot); + nfd.SetFormatMap(2, &map_usdot); + break; + case NEON_SQRDMLSH: + mnemonic = "sqrdmlsh"; + break; + } + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEON3Different(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; + + NEONFormatDecoder nfd(instr); + nfd.SetFormatMap(0, nfd.LongIntegerFormatMap()); + + // Ignore the Q bit. Appending a "2" suffix is handled later. + switch (instr->Mask(NEON3DifferentMask) & ~NEON_Q) { + case NEON_PMULL: + mnemonic = "pmull"; + break; + case NEON_SABAL: + mnemonic = "sabal"; + break; + case NEON_SABDL: + mnemonic = "sabdl"; + break; + case NEON_SADDL: + mnemonic = "saddl"; + break; + case NEON_SMLAL: + mnemonic = "smlal"; + break; + case NEON_SMLSL: + mnemonic = "smlsl"; + break; + case NEON_SMULL: + mnemonic = "smull"; + break; + case NEON_SSUBL: + mnemonic = "ssubl"; + break; + case NEON_SQDMLAL: + mnemonic = "sqdmlal"; + break; + case NEON_SQDMLSL: + mnemonic = "sqdmlsl"; + break; + case NEON_SQDMULL: + mnemonic = "sqdmull"; + break; + case NEON_UABAL: + mnemonic = "uabal"; + break; + case NEON_UABDL: + mnemonic = "uabdl"; + break; + case NEON_UADDL: + mnemonic = "uaddl"; + break; + case NEON_UMLAL: + mnemonic = "umlal"; + break; + case NEON_UMLSL: + mnemonic = "umlsl"; + break; + case NEON_UMULL: + mnemonic = "umull"; + break; + case NEON_USUBL: + mnemonic = "usubl"; + break; + case NEON_SADDW: + mnemonic = "saddw"; + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + break; + case NEON_SSUBW: + mnemonic = "ssubw"; + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + break; + case NEON_UADDW: + mnemonic = "uaddw"; + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + break; + case NEON_USUBW: + mnemonic = "usubw"; + nfd.SetFormatMap(1, nfd.LongIntegerFormatMap()); + break; + case NEON_ADDHN: + mnemonic = "addhn"; + nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + break; + case NEON_RADDHN: + mnemonic = "raddhn"; + nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + break; + case NEON_RSUBHN: + mnemonic = "rsubhn"; + nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + break; + case NEON_SUBHN: + mnemonic = "subhn"; + nfd.SetFormatMaps(nfd.LongIntegerFormatMap()); + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + break; + default: + form = "(NEON3Different)"; + } + Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONAcrossLanes(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, 'Vn.%s"; + const char *form_half = "'Hd, 'Vn.%s"; + bool half_op = false; + static const NEONFormatMap map_half = {{30}, {NF_4H, NF_8H}}; + + NEONFormatDecoder nfd(instr, + NEONFormatDecoder::ScalarFormatMap(), + NEONFormatDecoder::IntegerFormatMap()); + + if (instr->Mask(NEONAcrossLanesFP16FMask) == NEONAcrossLanesFP16Fixed) { + half_op = true; + form = form_half; + nfd.SetFormatMaps(&map_half); + switch (instr->Mask(NEONAcrossLanesFP16Mask)) { + case NEON_FMAXV_H: + mnemonic = "fmaxv"; + break; + case NEON_FMINV_H: + mnemonic = "fminv"; + break; + case NEON_FMAXNMV_H: + mnemonic = "fmaxnmv"; + break; + case NEON_FMINNMV_H: + mnemonic = "fminnmv"; + break; + } + } else if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) { + nfd.SetFormatMap(0, nfd.FPScalarFormatMap()); + nfd.SetFormatMap(1, nfd.FPFormatMap()); + switch (instr->Mask(NEONAcrossLanesFPMask)) { + case NEON_FMAXV: + mnemonic = "fmaxv"; + break; + case NEON_FMINV: + mnemonic = "fminv"; + break; + case NEON_FMAXNMV: + mnemonic = "fmaxnmv"; + break; + case NEON_FMINNMV: + mnemonic = "fminnmv"; + break; + default: + form = "(NEONAcrossLanes)"; + break; + } + } else if (instr->Mask(NEONAcrossLanesFMask) == NEONAcrossLanesFixed) { + switch (instr->Mask(NEONAcrossLanesMask)) { + case NEON_ADDV: + mnemonic = "addv"; + break; + case NEON_SMAXV: + mnemonic = "smaxv"; + break; + case NEON_SMINV: + mnemonic = "sminv"; + break; + case NEON_UMAXV: + mnemonic = "umaxv"; + break; + case NEON_UMINV: + mnemonic = "uminv"; + break; + case NEON_SADDLV: + mnemonic = "saddlv"; + nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); + break; + case NEON_UADDLV: + mnemonic = "uaddlv"; + nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); + break; + default: + form = "(NEONAcrossLanes)"; + break; + } + } + + if (half_op) { + Format(instr, mnemonic, nfd.Substitute(form)); + } else { + Format(instr, + mnemonic, + nfd.Substitute(form, + NEONFormatDecoder::kPlaceholder, + NEONFormatDecoder::kFormat)); + } +} + + +void Disassembler::VisitNEONByIndexedElement(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + bool l_instr = false; + bool fp_instr = false; + bool cn_instr = false; + bool half_instr = false; + bool fhm_instr = false; // FMLAL{2}, FMLSL{2} + + const char *form = "'Vd.%s, 'Vn.%s, 'Ve.%s['IVByElemIndex]"; + + static const NEONFormatMap map_ta = {{23, 22}, {NF_UNDEF, NF_4S, NF_2D}}; + static const NEONFormatMap map_cn = + {{23, 22, 30}, + {NF_UNDEF, NF_UNDEF, NF_4H, NF_8H, NF_UNDEF, NF_4S, NF_UNDEF, NF_UNDEF}}; + static const NEONFormatMap map_usdot = {{30}, {NF_8B, NF_16B}}; + static const NEONFormatMap map_half = {{30}, {NF_4H, NF_8H}}; + + NEONFormatDecoder nfd(instr, + &map_ta, + NEONFormatDecoder::IntegerFormatMap(), + NEONFormatDecoder::ScalarFormatMap()); + + switch (instr->Mask(NEONByIndexedElementMask)) { + case NEON_SMULL_byelement: + mnemonic = "smull"; + l_instr = true; + break; + case NEON_UMULL_byelement: + mnemonic = "umull"; + l_instr = true; + break; + case NEON_SMLAL_byelement: + mnemonic = "smlal"; + l_instr = true; + break; + case NEON_UMLAL_byelement: + mnemonic = "umlal"; + l_instr = true; + break; + case NEON_SMLSL_byelement: + mnemonic = "smlsl"; + l_instr = true; + break; + case NEON_UMLSL_byelement: + mnemonic = "umlsl"; + l_instr = true; + break; + case NEON_SQDMULL_byelement: + mnemonic = "sqdmull"; + l_instr = true; + break; + case NEON_SQDMLAL_byelement: + mnemonic = "sqdmlal"; + l_instr = true; + break; + case NEON_SQDMLSL_byelement: + mnemonic = "sqdmlsl"; + l_instr = true; + break; + case NEON_MUL_byelement: + mnemonic = "mul"; + break; + case NEON_MLA_byelement: + mnemonic = "mla"; + break; + case NEON_MLS_byelement: + mnemonic = "mls"; + break; + case NEON_SQDMULH_byelement: + mnemonic = "sqdmulh"; + break; + case NEON_SQRDMULH_byelement: + mnemonic = "sqrdmulh"; + break; + case NEON_SDOT_byelement: + mnemonic = "sdot"; + form = "'Vd.%s, 'Vn.%s, 'Ve.4b['IVByElemIndex]"; + nfd.SetFormatMap(1, &map_usdot); + break; + case NEON_SQRDMLAH_byelement: + mnemonic = "sqrdmlah"; + break; + case NEON_UDOT_byelement: + mnemonic = "udot"; + form = "'Vd.%s, 'Vn.%s, 'Ve.4b['IVByElemIndex]"; + nfd.SetFormatMap(1, &map_usdot); + break; + case NEON_SQRDMLSH_byelement: + mnemonic = "sqrdmlsh"; + break; + default: { + switch (instr->Mask(NEONByIndexedElementFPLongMask)) { + case NEON_FMLAL_H_byelement: + mnemonic = "fmlal"; + fhm_instr = true; + break; + case NEON_FMLAL2_H_byelement: + mnemonic = "fmlal2"; + fhm_instr = true; + break; + case NEON_FMLSL_H_byelement: + mnemonic = "fmlsl"; + fhm_instr = true; + break; + case NEON_FMLSL2_H_byelement: + mnemonic = "fmlsl2"; + fhm_instr = true; + break; + default: + switch (instr->Mask(NEONByIndexedElementFPMask)) { + case NEON_FMUL_byelement: + mnemonic = "fmul"; + fp_instr = true; + break; + case NEON_FMLA_byelement: + mnemonic = "fmla"; + fp_instr = true; + break; + case NEON_FMLS_byelement: + mnemonic = "fmls"; + fp_instr = true; + break; + case NEON_FMULX_byelement: + mnemonic = "fmulx"; + fp_instr = true; + break; + case NEON_FMLA_H_byelement: + mnemonic = "fmla"; + half_instr = true; + break; + case NEON_FMLS_H_byelement: + mnemonic = "fmls"; + half_instr = true; + break; + case NEON_FMUL_H_byelement: + mnemonic = "fmul"; + half_instr = true; + break; + case NEON_FMULX_H_byelement: + mnemonic = "fmulx"; + half_instr = true; + break; + default: + switch (instr->Mask(NEONByIndexedElementFPComplexMask)) { + case NEON_FCMLA_byelement: + mnemonic = "fcmla"; + cn_instr = true; + form = "'Vd.%s, 'Vn.%s, 'Ve.%s['IVByElemIndexRot], 'ILFCNR"; + break; + } + } + } + } + } + + if (fhm_instr) { + // These are oddballs. Set the format manually. + form = (instr->Mask(NEON_Q) == 0) + ? "'Vd.2s, 'Vn.2h, 'Ve.h['IVByElemIndexFHM]" + : "'Vd.4s, 'Vn.4h, 'Ve.h['IVByElemIndexFHM]"; + Format(instr, mnemonic, nfd.Substitute(form)); + } else if (half_instr) { + form = "'Vd.%s, 'Vn.%s, 'Ve.h['IVByElemIndex]"; + nfd.SetFormatMaps(&map_half, &map_half); + Format(instr, mnemonic, nfd.Substitute(form)); + } else if (l_instr) { + Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form)); + } else if (fp_instr) { + nfd.SetFormatMap(0, nfd.FPFormatMap()); + Format(instr, mnemonic, nfd.Substitute(form)); + } else if (cn_instr) { + nfd.SetFormatMap(0, &map_cn); + nfd.SetFormatMap(1, &map_cn); + Format(instr, mnemonic, nfd.Substitute(form)); + } else { + nfd.SetFormatMap(0, nfd.IntegerFormatMap()); + Format(instr, mnemonic, nfd.Substitute(form)); + } +} + + +void Disassembler::VisitNEONCopy(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(NEONCopy)"; + + NEONFormatDecoder nfd(instr, + NEONFormatDecoder::TriangularFormatMap(), + NEONFormatDecoder::TriangularScalarFormatMap()); + + if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) { + mnemonic = "mov"; + nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); + form = "'Vd.%s['IVInsIndex1], 'Vn.%s['IVInsIndex2]"; + } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) { + mnemonic = "mov"; + nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); + if (nfd.GetVectorFormat() == kFormatD) { + form = "'Vd.%s['IVInsIndex1], 'Xn"; + } else { + form = "'Vd.%s['IVInsIndex1], 'Wn"; + } + } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) { + if (instr->Mask(NEON_Q) || ((instr->GetImmNEON5() & 7) == 4)) { + mnemonic = "mov"; + } else { + mnemonic = "umov"; + } + nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); + if (nfd.GetVectorFormat() == kFormatD) { + form = "'Xd, 'Vn.%s['IVInsIndex1]"; + } else { + form = "'Wd, 'Vn.%s['IVInsIndex1]"; + } + } else if (instr->Mask(NEONCopySmovMask) == NEON_SMOV) { + mnemonic = "smov"; + nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap()); + form = "'Rdq, 'Vn.%s['IVInsIndex1]"; + } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) { + mnemonic = "dup"; + form = "'Vd.%s, 'Vn.%s['IVInsIndex1]"; + } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) { + mnemonic = "dup"; + if (nfd.GetVectorFormat() == kFormat2D) { + form = "'Vd.%s, 'Xn"; + } else { + form = "'Vd.%s, 'Wn"; + } + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONExtract(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(NEONExtract)"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap()); + if (instr->Mask(NEONExtractMask) == NEON_EXT) { + mnemonic = "ext"; + form = "'Vd.%s, 'Vn.%s, 'Vm.%s, 'IVExtract"; + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONLoadStoreMultiStruct(const Instruction *instr) { + const char *mnemonic = NULL; + const char *form = NULL; + const char *form_1v = "{'Vt.%1$s}, ['Xns]"; + const char *form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns]"; + const char *form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns]"; + const char *form_4v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + + switch (instr->Mask(NEONLoadStoreMultiStructMask)) { + case NEON_LD1_1v: + mnemonic = "ld1"; + form = form_1v; + break; + case NEON_LD1_2v: + mnemonic = "ld1"; + form = form_2v; + break; + case NEON_LD1_3v: + mnemonic = "ld1"; + form = form_3v; + break; + case NEON_LD1_4v: + mnemonic = "ld1"; + form = form_4v; + break; + case NEON_LD2: + mnemonic = "ld2"; + form = form_2v; + break; + case NEON_LD3: + mnemonic = "ld3"; + form = form_3v; + break; + case NEON_LD4: + mnemonic = "ld4"; + form = form_4v; + break; + case NEON_ST1_1v: + mnemonic = "st1"; + form = form_1v; + break; + case NEON_ST1_2v: + mnemonic = "st1"; + form = form_2v; + break; + case NEON_ST1_3v: + mnemonic = "st1"; + form = form_3v; + break; + case NEON_ST1_4v: + mnemonic = "st1"; + form = form_4v; + break; + case NEON_ST2: + mnemonic = "st2"; + form = form_2v; + break; + case NEON_ST3: + mnemonic = "st3"; + form = form_3v; + break; + case NEON_ST4: + mnemonic = "st4"; + form = form_4v; + break; + default: + break; + } + + // Work out unallocated encodings. + bool allocated = (mnemonic != NULL); + switch (instr->Mask(NEONLoadStoreMultiStructMask)) { + case NEON_LD2: + case NEON_LD3: + case NEON_LD4: + case NEON_ST2: + case NEON_ST3: + case NEON_ST4: + // LD[2-4] and ST[2-4] cannot use .1d format. + allocated = (instr->GetNEONQ() != 0) || (instr->GetNEONLSSize() != 3); + break; + default: + break; + } + if (allocated) { + VIXL_ASSERT(mnemonic != NULL); + VIXL_ASSERT(form != NULL); + } else { + mnemonic = "unallocated"; + form = "(NEONLoadStoreMultiStruct)"; + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONLoadStoreMultiStructPostIndex( + const Instruction *instr) { + const char *mnemonic = NULL; + const char *form = NULL; + const char *form_1v = "{'Vt.%1$s}, ['Xns], 'Xmr1"; + const char *form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns], 'Xmr2"; + const char *form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns], 'Xmr3"; + const char *form_4v = + "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmr4"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + + switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) { + case NEON_LD1_1v_post: + mnemonic = "ld1"; + form = form_1v; + break; + case NEON_LD1_2v_post: + mnemonic = "ld1"; + form = form_2v; + break; + case NEON_LD1_3v_post: + mnemonic = "ld1"; + form = form_3v; + break; + case NEON_LD1_4v_post: + mnemonic = "ld1"; + form = form_4v; + break; + case NEON_LD2_post: + mnemonic = "ld2"; + form = form_2v; + break; + case NEON_LD3_post: + mnemonic = "ld3"; + form = form_3v; + break; + case NEON_LD4_post: + mnemonic = "ld4"; + form = form_4v; + break; + case NEON_ST1_1v_post: + mnemonic = "st1"; + form = form_1v; + break; + case NEON_ST1_2v_post: + mnemonic = "st1"; + form = form_2v; + break; + case NEON_ST1_3v_post: + mnemonic = "st1"; + form = form_3v; + break; + case NEON_ST1_4v_post: + mnemonic = "st1"; + form = form_4v; + break; + case NEON_ST2_post: + mnemonic = "st2"; + form = form_2v; + break; + case NEON_ST3_post: + mnemonic = "st3"; + form = form_3v; + break; + case NEON_ST4_post: + mnemonic = "st4"; + form = form_4v; + break; + default: + break; + } + + // Work out unallocated encodings. + bool allocated = (mnemonic != NULL); + switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) { + case NEON_LD2_post: + case NEON_LD3_post: + case NEON_LD4_post: + case NEON_ST2_post: + case NEON_ST3_post: + case NEON_ST4_post: + // LD[2-4] and ST[2-4] cannot use .1d format. + allocated = (instr->GetNEONQ() != 0) || (instr->GetNEONLSSize() != 3); + break; + default: + break; + } + if (allocated) { + VIXL_ASSERT(mnemonic != NULL); + VIXL_ASSERT(form != NULL); + } else { + mnemonic = "unallocated"; + form = "(NEONLoadStoreMultiStructPostIndex)"; + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONLoadStoreSingleStruct(const Instruction *instr) { + const char *mnemonic = NULL; + const char *form = NULL; + + const char *form_1b = "{'Vt.b}['IVLSLane0], ['Xns]"; + const char *form_1h = "{'Vt.h}['IVLSLane1], ['Xns]"; + const char *form_1s = "{'Vt.s}['IVLSLane2], ['Xns]"; + const char *form_1d = "{'Vt.d}['IVLSLane3], ['Xns]"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + + switch (instr->Mask(NEONLoadStoreSingleStructMask)) { + case NEON_LD1_b: + mnemonic = "ld1"; + form = form_1b; + break; + case NEON_LD1_h: + mnemonic = "ld1"; + form = form_1h; + break; + case NEON_LD1_s: + mnemonic = "ld1"; + VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d); + form = ((instr->GetNEONLSSize() & 1) == 0) ? form_1s : form_1d; + break; + case NEON_ST1_b: + mnemonic = "st1"; + form = form_1b; + break; + case NEON_ST1_h: + mnemonic = "st1"; + form = form_1h; + break; + case NEON_ST1_s: + mnemonic = "st1"; + VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d); + form = ((instr->GetNEONLSSize() & 1) == 0) ? form_1s : form_1d; + break; + case NEON_LD1R: + mnemonic = "ld1r"; + form = "{'Vt.%s}, ['Xns]"; + break; + case NEON_LD2_b: + case NEON_ST2_b: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld2" : "st2"; + form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns]"; + break; + case NEON_LD2_h: + case NEON_ST2_h: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld2" : "st2"; + form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns]"; + break; + case NEON_LD2_s: + case NEON_ST2_s: + VIXL_STATIC_ASSERT((NEON_ST2_s | (1 << NEONLSSize_offset)) == NEON_ST2_d); + VIXL_STATIC_ASSERT((NEON_LD2_s | (1 << NEONLSSize_offset)) == NEON_LD2_d); + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld2" : "st2"; + if ((instr->GetNEONLSSize() & 1) == 0) { + form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns]"; + } else { + form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns]"; + } + break; + case NEON_LD2R: + mnemonic = "ld2r"; + form = "{'Vt.%s, 'Vt2.%s}, ['Xns]"; + break; + case NEON_LD3_b: + case NEON_ST3_b: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld3" : "st3"; + form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns]"; + break; + case NEON_LD3_h: + case NEON_ST3_h: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld3" : "st3"; + form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns]"; + break; + case NEON_LD3_s: + case NEON_ST3_s: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld3" : "st3"; + if ((instr->GetNEONLSSize() & 1) == 0) { + form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns]"; + } else { + form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns]"; + } + break; + case NEON_LD3R: + mnemonic = "ld3r"; + form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns]"; + break; + case NEON_LD4_b: + case NEON_ST4_b: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld4" : "st4"; + form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns]"; + break; + case NEON_LD4_h: + case NEON_ST4_h: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld4" : "st4"; + form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns]"; + break; + case NEON_LD4_s: + case NEON_ST4_s: + VIXL_STATIC_ASSERT((NEON_LD4_s | (1 << NEONLSSize_offset)) == NEON_LD4_d); + VIXL_STATIC_ASSERT((NEON_ST4_s | (1 << NEONLSSize_offset)) == NEON_ST4_d); + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld4" : "st4"; + if ((instr->GetNEONLSSize() & 1) == 0) { + form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns]"; + } else { + form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns]"; + } + break; + case NEON_LD4R: + mnemonic = "ld4r"; + form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]"; + break; + default: + break; + } + + // Work out unallocated encodings. + bool allocated = (mnemonic != NULL); + switch (instr->Mask(NEONLoadStoreSingleStructMask)) { + case NEON_LD1_h: + case NEON_LD2_h: + case NEON_LD3_h: + case NEON_LD4_h: + case NEON_ST1_h: + case NEON_ST2_h: + case NEON_ST3_h: + case NEON_ST4_h: + VIXL_ASSERT(allocated); + allocated = ((instr->GetNEONLSSize() & 1) == 0); + break; + case NEON_LD1_s: + case NEON_LD2_s: + case NEON_LD3_s: + case NEON_LD4_s: + case NEON_ST1_s: + case NEON_ST2_s: + case NEON_ST3_s: + case NEON_ST4_s: + VIXL_ASSERT(allocated); + allocated = (instr->GetNEONLSSize() <= 1) && + ((instr->GetNEONLSSize() == 0) || (instr->GetNEONS() == 0)); + break; + case NEON_LD1R: + case NEON_LD2R: + case NEON_LD3R: + case NEON_LD4R: + VIXL_ASSERT(allocated); + allocated = (instr->GetNEONS() == 0); + break; + default: + break; + } + if (allocated) { + VIXL_ASSERT(mnemonic != NULL); + VIXL_ASSERT(form != NULL); + } else { + mnemonic = "unallocated"; + form = "(NEONLoadStoreSingleStruct)"; + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONLoadStoreSingleStructPostIndex( + const Instruction *instr) { + const char *mnemonic = NULL; + const char *form = NULL; + + const char *form_1b = "{'Vt.b}['IVLSLane0], ['Xns], 'Xmb1"; + const char *form_1h = "{'Vt.h}['IVLSLane1], ['Xns], 'Xmb2"; + const char *form_1s = "{'Vt.s}['IVLSLane2], ['Xns], 'Xmb4"; + const char *form_1d = "{'Vt.d}['IVLSLane3], ['Xns], 'Xmb8"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + + switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) { + case NEON_LD1_b_post: + mnemonic = "ld1"; + form = form_1b; + break; + case NEON_LD1_h_post: + mnemonic = "ld1"; + form = form_1h; + break; + case NEON_LD1_s_post: + mnemonic = "ld1"; + VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d); + form = ((instr->GetNEONLSSize() & 1) == 0) ? form_1s : form_1d; + break; + case NEON_ST1_b_post: + mnemonic = "st1"; + form = form_1b; + break; + case NEON_ST1_h_post: + mnemonic = "st1"; + form = form_1h; + break; + case NEON_ST1_s_post: + mnemonic = "st1"; + VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d); + form = ((instr->GetNEONLSSize() & 1) == 0) ? form_1s : form_1d; + break; + case NEON_LD1R_post: + mnemonic = "ld1r"; + form = "{'Vt.%s}, ['Xns], 'Xmz1"; + break; + case NEON_LD2_b_post: + case NEON_ST2_b_post: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld2" : "st2"; + form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns], 'Xmb2"; + break; + case NEON_ST2_h_post: + case NEON_LD2_h_post: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld2" : "st2"; + form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns], 'Xmb4"; + break; + case NEON_LD2_s_post: + case NEON_ST2_s_post: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld2" : "st2"; + if ((instr->GetNEONLSSize() & 1) == 0) + form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns], 'Xmb8"; + else + form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns], 'Xmb16"; + break; + case NEON_LD2R_post: + mnemonic = "ld2r"; + form = "{'Vt.%s, 'Vt2.%s}, ['Xns], 'Xmz2"; + break; + case NEON_LD3_b_post: + case NEON_ST3_b_post: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld3" : "st3"; + form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns], 'Xmb3"; + break; + case NEON_LD3_h_post: + case NEON_ST3_h_post: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld3" : "st3"; + form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns], 'Xmb6"; + break; + case NEON_LD3_s_post: + case NEON_ST3_s_post: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld3" : "st3"; + if ((instr->GetNEONLSSize() & 1) == 0) + form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns], 'Xmb12"; + else + form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns], 'Xmb24"; + break; + case NEON_LD3R_post: + mnemonic = "ld3r"; + form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns], 'Xmz3"; + break; + case NEON_LD4_b_post: + case NEON_ST4_b_post: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld4" : "st4"; + form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns], 'Xmb4"; + break; + case NEON_LD4_h_post: + case NEON_ST4_h_post: + mnemonic = (instr->GetLdStXLoad()) == 1 ? "ld4" : "st4"; + form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns], 'Xmb8"; + break; + case NEON_LD4_s_post: + case NEON_ST4_s_post: + mnemonic = (instr->GetLdStXLoad() == 1) ? "ld4" : "st4"; + if ((instr->GetNEONLSSize() & 1) == 0) + form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns], 'Xmb16"; + else + form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns], 'Xmb32"; + break; + case NEON_LD4R_post: + mnemonic = "ld4r"; + form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmz4"; + break; + default: + break; + } + + // Work out unallocated encodings. + bool allocated = (mnemonic != NULL); + switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) { + case NEON_LD1_h_post: + case NEON_LD2_h_post: + case NEON_LD3_h_post: + case NEON_LD4_h_post: + case NEON_ST1_h_post: + case NEON_ST2_h_post: + case NEON_ST3_h_post: + case NEON_ST4_h_post: + VIXL_ASSERT(allocated); + allocated = ((instr->GetNEONLSSize() & 1) == 0); + break; + case NEON_LD1_s_post: + case NEON_LD2_s_post: + case NEON_LD3_s_post: + case NEON_LD4_s_post: + case NEON_ST1_s_post: + case NEON_ST2_s_post: + case NEON_ST3_s_post: + case NEON_ST4_s_post: + VIXL_ASSERT(allocated); + allocated = (instr->GetNEONLSSize() <= 1) && + ((instr->GetNEONLSSize() == 0) || (instr->GetNEONS() == 0)); + break; + case NEON_LD1R_post: + case NEON_LD2R_post: + case NEON_LD3R_post: + case NEON_LD4R_post: + VIXL_ASSERT(allocated); + allocated = (instr->GetNEONS() == 0); + break; + default: + break; + } + if (allocated) { + VIXL_ASSERT(mnemonic != NULL); + VIXL_ASSERT(form != NULL); + } else { + mnemonic = "unallocated"; + form = "(NEONLoadStoreSingleStructPostIndex)"; + } + + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONModifiedImmediate(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vt.%s, 'IVMIImm8, lsl 'IVMIShiftAmt1"; + + int half_enc = instr->ExtractBit(11); + int cmode = instr->GetNEONCmode(); + int cmode_3 = (cmode >> 3) & 1; + int cmode_2 = (cmode >> 2) & 1; + int cmode_1 = (cmode >> 1) & 1; + int cmode_0 = cmode & 1; + int q = instr->GetNEONQ(); + int op = instr->GetNEONModImmOp(); + + static const NEONFormatMap map_b = {{30}, {NF_8B, NF_16B}}; + static const NEONFormatMap map_h = {{30}, {NF_4H, NF_8H}}; + static const NEONFormatMap map_s = {{30}, {NF_2S, NF_4S}}; + NEONFormatDecoder nfd(instr, &map_b); + if (cmode_3 == 0) { + if (cmode_0 == 0) { + mnemonic = (op == 1) ? "mvni" : "movi"; + } else { // cmode<0> == '1'. + mnemonic = (op == 1) ? "bic" : "orr"; + } + nfd.SetFormatMap(0, &map_s); + } else { // cmode<3> == '1'. + if (cmode_2 == 0) { + if (cmode_0 == 0) { + mnemonic = (op == 1) ? "mvni" : "movi"; + } else { // cmode<0> == '1'. + mnemonic = (op == 1) ? "bic" : "orr"; + } + nfd.SetFormatMap(0, &map_h); + } else { // cmode<2> == '1'. + if (cmode_1 == 0) { + mnemonic = (op == 1) ? "mvni" : "movi"; + form = "'Vt.%s, 'IVMIImm8, msl 'IVMIShiftAmt2"; + nfd.SetFormatMap(0, &map_s); + } else { // cmode<1> == '1'. + if (cmode_0 == 0) { + mnemonic = "movi"; + if (op == 0) { + form = "'Vt.%s, 'IVMIImm8"; + } else { + form = (q == 0) ? "'Dd, 'IVMIImm" : "'Vt.2d, 'IVMIImm"; + } + } else { // cmode<0> == '1' + mnemonic = "fmov"; + if (half_enc == 1) { + form = "'Vt.%s, 'IVMIImmFPHalf"; + nfd.SetFormatMap(0, &map_h); + } else if (op == 0) { + form = "'Vt.%s, 'IVMIImmFPSingle"; + nfd.SetFormatMap(0, &map_s); + } else { + if (q == 1) { + form = "'Vt.2d, 'IVMIImmFPDouble"; + } else { + mnemonic = "unallocated"; + form = "(NEONModifiedImmediate)"; + } + } + } + } + } + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONScalar2RegMisc(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, %sn"; + const char *form_0 = "%sd, %sn, #0"; + const char *form_fp0 = "%sd, %sn, #0.0"; + + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + + if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) { + // These instructions all use a two bit size field, except NOT and RBIT, + // which use the field to encode the operation. + switch (instr->Mask(NEONScalar2RegMiscMask)) { + case NEON_CMGT_zero_scalar: + mnemonic = "cmgt"; + form = form_0; + break; + case NEON_CMGE_zero_scalar: + mnemonic = "cmge"; + form = form_0; + break; + case NEON_CMLE_zero_scalar: + mnemonic = "cmle"; + form = form_0; + break; + case NEON_CMLT_zero_scalar: + mnemonic = "cmlt"; + form = form_0; + break; + case NEON_CMEQ_zero_scalar: + mnemonic = "cmeq"; + form = form_0; + break; + case NEON_NEG_scalar: + mnemonic = "neg"; + break; + case NEON_SQNEG_scalar: + mnemonic = "sqneg"; + break; + case NEON_ABS_scalar: + mnemonic = "abs"; + break; + case NEON_SQABS_scalar: + mnemonic = "sqabs"; + break; + case NEON_SUQADD_scalar: + mnemonic = "suqadd"; + break; + case NEON_USQADD_scalar: + mnemonic = "usqadd"; + break; + default: + form = "(NEONScalar2RegMisc)"; + } + } else { + // These instructions all use a one bit size field, except SQXTUN, SQXTN + // and UQXTN, which use a two bit size field. + nfd.SetFormatMaps(nfd.FPScalarFormatMap()); + switch (instr->Mask(NEONScalar2RegMiscFPMask)) { + case NEON_FRSQRTE_scalar: + mnemonic = "frsqrte"; + break; + case NEON_FRECPE_scalar: + mnemonic = "frecpe"; + break; + case NEON_SCVTF_scalar: + mnemonic = "scvtf"; + break; + case NEON_UCVTF_scalar: + mnemonic = "ucvtf"; + break; + case NEON_FCMGT_zero_scalar: + mnemonic = "fcmgt"; + form = form_fp0; + break; + case NEON_FCMGE_zero_scalar: + mnemonic = "fcmge"; + form = form_fp0; + break; + case NEON_FCMLE_zero_scalar: + mnemonic = "fcmle"; + form = form_fp0; + break; + case NEON_FCMLT_zero_scalar: + mnemonic = "fcmlt"; + form = form_fp0; + break; + case NEON_FCMEQ_zero_scalar: + mnemonic = "fcmeq"; + form = form_fp0; + break; + case NEON_FRECPX_scalar: + mnemonic = "frecpx"; + break; + case NEON_FCVTNS_scalar: + mnemonic = "fcvtns"; + break; + case NEON_FCVTNU_scalar: + mnemonic = "fcvtnu"; + break; + case NEON_FCVTPS_scalar: + mnemonic = "fcvtps"; + break; + case NEON_FCVTPU_scalar: + mnemonic = "fcvtpu"; + break; + case NEON_FCVTMS_scalar: + mnemonic = "fcvtms"; + break; + case NEON_FCVTMU_scalar: + mnemonic = "fcvtmu"; + break; + case NEON_FCVTZS_scalar: + mnemonic = "fcvtzs"; + break; + case NEON_FCVTZU_scalar: + mnemonic = "fcvtzu"; + break; + case NEON_FCVTAS_scalar: + mnemonic = "fcvtas"; + break; + case NEON_FCVTAU_scalar: + mnemonic = "fcvtau"; + break; + case NEON_FCVTXN_scalar: + nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); + mnemonic = "fcvtxn"; + break; + default: + nfd.SetFormatMap(0, nfd.ScalarFormatMap()); + nfd.SetFormatMap(1, nfd.LongScalarFormatMap()); + switch (instr->Mask(NEONScalar2RegMiscMask)) { + case NEON_SQXTN_scalar: + mnemonic = "sqxtn"; + break; + case NEON_UQXTN_scalar: + mnemonic = "uqxtn"; + break; + case NEON_SQXTUN_scalar: + mnemonic = "sqxtun"; + break; + default: + form = "(NEONScalar2RegMisc)"; + } + } + } + Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); +} + +void Disassembler::VisitNEONScalar2RegMiscFP16(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Hd, 'Hn"; + const char *form_fp0 = "'Hd, 'Hn, #0.0"; + + switch (instr->Mask(NEONScalar2RegMiscFP16Mask)) { +#define FORMAT(A, B) \ + case NEON_##A##_H_scalar: \ + mnemonic = B; \ + break; + // clang-format off + FORMAT(FCVTNS, "fcvtns") + FORMAT(FCVTMS, "fcvtms") + FORMAT(FCVTAS, "fcvtas") + FORMAT(SCVTF, "scvtf") + FORMAT(FCVTPS, "fcvtps") + FORMAT(FCVTZS, "fcvtzs") + FORMAT(FRECPE, "frecpe") + FORMAT(FRECPX, "frecpx") + FORMAT(FCVTNU, "fcvtnu") + FORMAT(FCVTMU, "fcvtmu") + FORMAT(FCVTAU, "fcvtau") + FORMAT(UCVTF, "ucvtf") + FORMAT(FCVTPU, "fcvtpu") + FORMAT(FCVTZU, "fcvtzu") + FORMAT(FRSQRTE, "frsqrte") +// clang-format on +#undef FORMAT +#define FORMAT(A, B) \ + case NEON_##A##_H_zero_scalar: \ + mnemonic = B; \ + form = form_fp0; \ + break; + FORMAT(FCMGT, "fcmgt") + FORMAT(FCMEQ, "fcmeq") + FORMAT(FCMLT, "fcmlt") + FORMAT(FCMGE, "fcmge") + FORMAT(FCMLE, "fcmle") +#undef FORMAT + + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + + +void Disassembler::VisitNEONScalar3Diff(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, %sn, %sm"; + NEONFormatDecoder nfd(instr, + NEONFormatDecoder::LongScalarFormatMap(), + NEONFormatDecoder::ScalarFormatMap()); + + switch (instr->Mask(NEONScalar3DiffMask)) { + case NEON_SQDMLAL_scalar: + mnemonic = "sqdmlal"; + break; + case NEON_SQDMLSL_scalar: + mnemonic = "sqdmlsl"; + break; + case NEON_SQDMULL_scalar: + mnemonic = "sqdmull"; + break; + default: + form = "(NEONScalar3Diff)"; + } + Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); +} + + +void Disassembler::VisitNEONScalar3Same(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, %sn, %sm"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + + if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) { + nfd.SetFormatMaps(nfd.FPScalarFormatMap()); + switch (instr->Mask(NEONScalar3SameFPMask)) { + case NEON_FACGE_scalar: + mnemonic = "facge"; + break; + case NEON_FACGT_scalar: + mnemonic = "facgt"; + break; + case NEON_FCMEQ_scalar: + mnemonic = "fcmeq"; + break; + case NEON_FCMGE_scalar: + mnemonic = "fcmge"; + break; + case NEON_FCMGT_scalar: + mnemonic = "fcmgt"; + break; + case NEON_FMULX_scalar: + mnemonic = "fmulx"; + break; + case NEON_FRECPS_scalar: + mnemonic = "frecps"; + break; + case NEON_FRSQRTS_scalar: + mnemonic = "frsqrts"; + break; + case NEON_FABD_scalar: + mnemonic = "fabd"; + break; + default: + form = "(NEONScalar3Same)"; + } + } else { + switch (instr->Mask(NEONScalar3SameMask)) { + case NEON_ADD_scalar: + mnemonic = "add"; + break; + case NEON_SUB_scalar: + mnemonic = "sub"; + break; + case NEON_CMEQ_scalar: + mnemonic = "cmeq"; + break; + case NEON_CMGE_scalar: + mnemonic = "cmge"; + break; + case NEON_CMGT_scalar: + mnemonic = "cmgt"; + break; + case NEON_CMHI_scalar: + mnemonic = "cmhi"; + break; + case NEON_CMHS_scalar: + mnemonic = "cmhs"; + break; + case NEON_CMTST_scalar: + mnemonic = "cmtst"; + break; + case NEON_UQADD_scalar: + mnemonic = "uqadd"; + break; + case NEON_SQADD_scalar: + mnemonic = "sqadd"; + break; + case NEON_UQSUB_scalar: + mnemonic = "uqsub"; + break; + case NEON_SQSUB_scalar: + mnemonic = "sqsub"; + break; + case NEON_USHL_scalar: + mnemonic = "ushl"; + break; + case NEON_SSHL_scalar: + mnemonic = "sshl"; + break; + case NEON_UQSHL_scalar: + mnemonic = "uqshl"; + break; + case NEON_SQSHL_scalar: + mnemonic = "sqshl"; + break; + case NEON_URSHL_scalar: + mnemonic = "urshl"; + break; + case NEON_SRSHL_scalar: + mnemonic = "srshl"; + break; + case NEON_UQRSHL_scalar: + mnemonic = "uqrshl"; + break; + case NEON_SQRSHL_scalar: + mnemonic = "sqrshl"; + break; + case NEON_SQDMULH_scalar: + mnemonic = "sqdmulh"; + break; + case NEON_SQRDMULH_scalar: + mnemonic = "sqrdmulh"; + break; + default: + form = "(NEONScalar3Same)"; + } + } + Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); +} + +void Disassembler::VisitNEONScalar3SameFP16(const Instruction *instr) { + const char *mnemonic = NULL; + const char *form = "'Hd, 'Hn, 'Hm"; + + switch (instr->Mask(NEONScalar3SameFP16Mask)) { + case NEON_FABD_H_scalar: + mnemonic = "fabd"; + break; + case NEON_FMULX_H_scalar: + mnemonic = "fmulx"; + break; + case NEON_FCMEQ_H_scalar: + mnemonic = "fcmeq"; + break; + case NEON_FCMGE_H_scalar: + mnemonic = "fcmge"; + break; + case NEON_FCMGT_H_scalar: + mnemonic = "fcmgt"; + break; + case NEON_FACGE_H_scalar: + mnemonic = "facge"; + break; + case NEON_FACGT_H_scalar: + mnemonic = "facgt"; + break; + case NEON_FRECPS_H_scalar: + mnemonic = "frecps"; + break; + case NEON_FRSQRTS_H_scalar: + mnemonic = "frsqrts"; + break; + default: + VIXL_UNREACHABLE(); + } + Format(instr, mnemonic, form); +} + +void Disassembler::VisitNEONScalar3SameExtra(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, %sn, %sm"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + + switch (instr->Mask(NEONScalar3SameExtraMask)) { + case NEON_SQRDMLAH_scalar: + mnemonic = "sqrdmlah"; + break; + case NEON_SQRDMLSH_scalar: + mnemonic = "sqrdmlsh"; + break; + default: + form = "(NEONScalar3SameExtra)"; + } + Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); +} + + +void Disassembler::VisitNEONScalarByIndexedElement(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, %sn, 'Ve.%s['IVByElemIndex]"; + const char *form_half = "'Hd, 'Hn, 'Ve.h['IVByElemIndex]"; + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + bool long_instr = false; + + switch (instr->Mask(NEONScalarByIndexedElementMask)) { + case NEON_SQDMULL_byelement_scalar: + mnemonic = "sqdmull"; + long_instr = true; + break; + case NEON_SQDMLAL_byelement_scalar: + mnemonic = "sqdmlal"; + long_instr = true; + break; + case NEON_SQDMLSL_byelement_scalar: + mnemonic = "sqdmlsl"; + long_instr = true; + break; + case NEON_SQDMULH_byelement_scalar: + mnemonic = "sqdmulh"; + break; + case NEON_SQRDMULH_byelement_scalar: + mnemonic = "sqrdmulh"; + break; + case NEON_SQRDMLAH_byelement_scalar: + mnemonic = "sqrdmlah"; + break; + case NEON_SQRDMLSH_byelement_scalar: + mnemonic = "sqrdmlsh"; + break; + default: + nfd.SetFormatMap(0, nfd.FPScalarFormatMap()); + switch (instr->Mask(NEONScalarByIndexedElementFPMask)) { + case NEON_FMUL_byelement_scalar: + mnemonic = "fmul"; + break; + case NEON_FMLA_byelement_scalar: + mnemonic = "fmla"; + break; + case NEON_FMLS_byelement_scalar: + mnemonic = "fmls"; + break; + case NEON_FMULX_byelement_scalar: + mnemonic = "fmulx"; + break; + case NEON_FMLA_H_byelement_scalar: + mnemonic = "fmla"; + form = form_half; + break; + case NEON_FMLS_H_byelement_scalar: + mnemonic = "fmls"; + form = form_half; + break; + case NEON_FMUL_H_byelement_scalar: + mnemonic = "fmul"; + form = form_half; + break; + case NEON_FMULX_H_byelement_scalar: + mnemonic = "fmulx"; + form = form_half; + break; + default: + form = "(NEONScalarByIndexedElement)"; + } + } + + if (long_instr) { + nfd.SetFormatMap(0, nfd.LongScalarFormatMap()); + } + + Format(instr, + mnemonic, + nfd.Substitute(form, nfd.kPlaceholder, nfd.kPlaceholder, nfd.kFormat)); +} + + +void Disassembler::VisitNEONScalarCopy(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(NEONScalarCopy)"; + + NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap()); + + if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) { + mnemonic = "mov"; + form = "%sd, 'Vn.%s['IVInsIndex1]"; + } + + Format(instr, mnemonic, nfd.Substitute(form, nfd.kPlaceholder, nfd.kFormat)); +} + + +void Disassembler::VisitNEONScalarPairwise(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, 'Vn.%s"; + NEONFormatMap map = {{22}, {NF_2S, NF_2D}}; + NEONFormatDecoder nfd(instr, + NEONFormatDecoder::FPScalarPairwiseFormatMap(), + &map); + + switch (instr->Mask(NEONScalarPairwiseMask)) { + case NEON_ADDP_scalar: + // All pairwise operations except ADDP use bit U to differentiate FP16 + // from FP32/FP64 variations. + nfd.SetFormatMap(0, NEONFormatDecoder::FPScalarFormatMap()); + mnemonic = "addp"; + break; + case NEON_FADDP_h_scalar: + form = "%sd, 'Vn.2h"; + VIXL_FALLTHROUGH(); + case NEON_FADDP_scalar: + mnemonic = "faddp"; + break; + case NEON_FMAXP_h_scalar: + form = "%sd, 'Vn.2h"; + VIXL_FALLTHROUGH(); + case NEON_FMAXP_scalar: + mnemonic = "fmaxp"; + break; + case NEON_FMAXNMP_h_scalar: + form = "%sd, 'Vn.2h"; + VIXL_FALLTHROUGH(); + case NEON_FMAXNMP_scalar: + mnemonic = "fmaxnmp"; + break; + case NEON_FMINP_h_scalar: + form = "%sd, 'Vn.2h"; + VIXL_FALLTHROUGH(); + case NEON_FMINP_scalar: + mnemonic = "fminp"; + break; + case NEON_FMINNMP_h_scalar: + form = "%sd, 'Vn.2h"; + VIXL_FALLTHROUGH(); + case NEON_FMINNMP_scalar: + mnemonic = "fminnmp"; + break; + default: + form = "(NEONScalarPairwise)"; + } + Format(instr, + mnemonic, + nfd.Substitute(form, + NEONFormatDecoder::kPlaceholder, + NEONFormatDecoder::kFormat)); +} + + +void Disassembler::VisitNEONScalarShiftImmediate(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "%sd, %sn, 'Is1"; + const char *form_2 = "%sd, %sn, 'Is2"; + + static const NEONFormatMap map_shift = {{22, 21, 20, 19}, + {NF_UNDEF, + NF_B, + NF_H, + NF_H, + NF_S, + NF_S, + NF_S, + NF_S, + NF_D, + NF_D, + NF_D, + NF_D, + NF_D, + NF_D, + NF_D, + NF_D}}; + static const NEONFormatMap map_shift_narrow = + {{21, 20, 19}, {NF_UNDEF, NF_H, NF_S, NF_S, NF_D, NF_D, NF_D, NF_D}}; + NEONFormatDecoder nfd(instr, &map_shift); + + if (instr->GetImmNEONImmh()) { // immh has to be non-zero. + switch (instr->Mask(NEONScalarShiftImmediateMask)) { + case NEON_FCVTZU_imm_scalar: + mnemonic = "fcvtzu"; + break; + case NEON_FCVTZS_imm_scalar: + mnemonic = "fcvtzs"; + break; + case NEON_SCVTF_imm_scalar: + mnemonic = "scvtf"; + break; + case NEON_UCVTF_imm_scalar: + mnemonic = "ucvtf"; + break; + case NEON_SRI_scalar: + mnemonic = "sri"; + break; + case NEON_SSHR_scalar: + mnemonic = "sshr"; + break; + case NEON_USHR_scalar: + mnemonic = "ushr"; + break; + case NEON_SRSHR_scalar: + mnemonic = "srshr"; + break; + case NEON_URSHR_scalar: + mnemonic = "urshr"; + break; + case NEON_SSRA_scalar: + mnemonic = "ssra"; + break; + case NEON_USRA_scalar: + mnemonic = "usra"; + break; + case NEON_SRSRA_scalar: + mnemonic = "srsra"; + break; + case NEON_URSRA_scalar: + mnemonic = "ursra"; + break; + case NEON_SHL_scalar: + mnemonic = "shl"; + form = form_2; + break; + case NEON_SLI_scalar: + mnemonic = "sli"; + form = form_2; + break; + case NEON_SQSHLU_scalar: + mnemonic = "sqshlu"; + form = form_2; + break; + case NEON_SQSHL_imm_scalar: + mnemonic = "sqshl"; + form = form_2; + break; + case NEON_UQSHL_imm_scalar: + mnemonic = "uqshl"; + form = form_2; + break; + case NEON_UQSHRN_scalar: + mnemonic = "uqshrn"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_UQRSHRN_scalar: + mnemonic = "uqrshrn"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_SQSHRN_scalar: + mnemonic = "sqshrn"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_SQRSHRN_scalar: + mnemonic = "sqrshrn"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_SQSHRUN_scalar: + mnemonic = "sqshrun"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + case NEON_SQRSHRUN_scalar: + mnemonic = "sqrshrun"; + nfd.SetFormatMap(1, &map_shift_narrow); + break; + default: + form = "(NEONScalarShiftImmediate)"; + } + } else { + form = "(NEONScalarShiftImmediate)"; + } + Format(instr, mnemonic, nfd.SubstitutePlaceholders(form)); +} + + +void Disassembler::VisitNEONShiftImmediate(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s, 'Is1"; + const char *form_shift_2 = "'Vd.%s, 'Vn.%s, 'Is2"; + const char *form_xtl = "'Vd.%s, 'Vn.%s"; + + // 0001->8H, 001x->4S, 01xx->2D, all others undefined. + static const NEONFormatMap map_shift_ta = + {{22, 21, 20, 19}, + {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}}; + + // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H, + // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined. + static const NEONFormatMap map_shift_tb = + {{22, 21, 20, 19, 30}, + {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_4H, + NF_8H, NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, + NF_2S, NF_4S, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, + NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D}}; + + NEONFormatDecoder nfd(instr, &map_shift_tb); + + if (instr->GetImmNEONImmh()) { // immh has to be non-zero. + switch (instr->Mask(NEONShiftImmediateMask)) { + case NEON_SQSHLU: + mnemonic = "sqshlu"; + form = form_shift_2; + break; + case NEON_SQSHL_imm: + mnemonic = "sqshl"; + form = form_shift_2; + break; + case NEON_UQSHL_imm: + mnemonic = "uqshl"; + form = form_shift_2; + break; + case NEON_SHL: + mnemonic = "shl"; + form = form_shift_2; + break; + case NEON_SLI: + mnemonic = "sli"; + form = form_shift_2; + break; + case NEON_SCVTF_imm: + mnemonic = "scvtf"; + break; + case NEON_UCVTF_imm: + mnemonic = "ucvtf"; + break; + case NEON_FCVTZU_imm: + mnemonic = "fcvtzu"; + break; + case NEON_FCVTZS_imm: + mnemonic = "fcvtzs"; + break; + case NEON_SRI: + mnemonic = "sri"; + break; + case NEON_SSHR: + mnemonic = "sshr"; + break; + case NEON_USHR: + mnemonic = "ushr"; + break; + case NEON_SRSHR: + mnemonic = "srshr"; + break; + case NEON_URSHR: + mnemonic = "urshr"; + break; + case NEON_SSRA: + mnemonic = "ssra"; + break; + case NEON_USRA: + mnemonic = "usra"; + break; + case NEON_SRSRA: + mnemonic = "srsra"; + break; + case NEON_URSRA: + mnemonic = "ursra"; + break; + case NEON_SHRN: + mnemonic = instr->Mask(NEON_Q) ? "shrn2" : "shrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_RSHRN: + mnemonic = instr->Mask(NEON_Q) ? "rshrn2" : "rshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_UQSHRN: + mnemonic = instr->Mask(NEON_Q) ? "uqshrn2" : "uqshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_UQRSHRN: + mnemonic = instr->Mask(NEON_Q) ? "uqrshrn2" : "uqrshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SQSHRN: + mnemonic = instr->Mask(NEON_Q) ? "sqshrn2" : "sqshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SQRSHRN: + mnemonic = instr->Mask(NEON_Q) ? "sqrshrn2" : "sqrshrn"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SQSHRUN: + mnemonic = instr->Mask(NEON_Q) ? "sqshrun2" : "sqshrun"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SQRSHRUN: + mnemonic = instr->Mask(NEON_Q) ? "sqrshrun2" : "sqrshrun"; + nfd.SetFormatMap(1, &map_shift_ta); + break; + case NEON_SSHLL: + nfd.SetFormatMap(0, &map_shift_ta); + if (instr->GetImmNEONImmb() == 0 && + CountSetBits(instr->GetImmNEONImmh(), 32) == 1) { // sxtl variant. + form = form_xtl; + mnemonic = instr->Mask(NEON_Q) ? "sxtl2" : "sxtl"; + } else { // sshll variant. + form = form_shift_2; + mnemonic = instr->Mask(NEON_Q) ? "sshll2" : "sshll"; + } + break; + case NEON_USHLL: + nfd.SetFormatMap(0, &map_shift_ta); + if (instr->GetImmNEONImmb() == 0 && + CountSetBits(instr->GetImmNEONImmh(), 32) == 1) { // uxtl variant. + form = form_xtl; + mnemonic = instr->Mask(NEON_Q) ? "uxtl2" : "uxtl"; + } else { // ushll variant. + form = form_shift_2; + mnemonic = instr->Mask(NEON_Q) ? "ushll2" : "ushll"; + } + break; + default: + form = "(NEONShiftImmediate)"; + } + } else { + form = "(NEONShiftImmediate)"; + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitNEONTable(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "(NEONTable)"; + const char form_1v[] = "'Vd.%%s, {'Vn.16b}, 'Vm.%%s"; + const char form_2v[] = "'Vd.%%s, {'Vn.16b, v%d.16b}, 'Vm.%%s"; + const char form_3v[] = "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b}, 'Vm.%%s"; + const char form_4v[] = + "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b, v%d.16b}, 'Vm.%%s"; + static const NEONFormatMap map_b = {{30}, {NF_8B, NF_16B}}; + NEONFormatDecoder nfd(instr, &map_b); + + switch (instr->Mask(NEONTableMask)) { + case NEON_TBL_1v: + mnemonic = "tbl"; + form = form_1v; + break; + case NEON_TBL_2v: + mnemonic = "tbl"; + form = form_2v; + break; + case NEON_TBL_3v: + mnemonic = "tbl"; + form = form_3v; + break; + case NEON_TBL_4v: + mnemonic = "tbl"; + form = form_4v; + break; + case NEON_TBX_1v: + mnemonic = "tbx"; + form = form_1v; + break; + case NEON_TBX_2v: + mnemonic = "tbx"; + form = form_2v; + break; + case NEON_TBX_3v: + mnemonic = "tbx"; + form = form_3v; + break; + case NEON_TBX_4v: + mnemonic = "tbx"; + form = form_4v; + break; + default: + break; + } + + char re_form[sizeof(form_4v) + 6]; + int reg_num = instr->GetRn(); + snprintf(re_form, + sizeof(re_form), + form, + (reg_num + 1) % kNumberOfVRegisters, + (reg_num + 2) % kNumberOfVRegisters, + (reg_num + 3) % kNumberOfVRegisters); + + Format(instr, mnemonic, nfd.Substitute(re_form)); +} + + +void Disassembler::VisitNEONPerm(const Instruction *instr) { + const char *mnemonic = "unimplemented"; + const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s"; + NEONFormatDecoder nfd(instr); + + switch (instr->Mask(NEONPermMask)) { + case NEON_TRN1: + mnemonic = "trn1"; + break; + case NEON_TRN2: + mnemonic = "trn2"; + break; + case NEON_UZP1: + mnemonic = "uzp1"; + break; + case NEON_UZP2: + mnemonic = "uzp2"; + break; + case NEON_ZIP1: + mnemonic = "zip1"; + break; + case NEON_ZIP2: + mnemonic = "zip2"; + break; + default: + form = "(NEONPerm)"; + } + Format(instr, mnemonic, nfd.Substitute(form)); +} + + +void Disassembler::VisitUnimplemented(const Instruction *instr) { + Format(instr, "unimplemented", "(Unimplemented)"); +} + + +void Disassembler::VisitUnallocated(const Instruction *instr) { + Format(instr, "unallocated", "(Unallocated)"); +} + + +void Disassembler::ProcessOutput(const Instruction * /*instr*/) { + // The base disasm does nothing more than disassembling into a buffer. +} + + +void Disassembler::AppendRegisterNameToOutput(const Instruction *instr, + const CPURegister ®) { + USE(instr); + VIXL_ASSERT(reg.IsValid()); + char reg_char; + + if (reg.IsRegister()) { + reg_char = reg.Is64Bits() ? 'x' : 'w'; + } else { + VIXL_ASSERT(reg.IsVRegister()); + switch (reg.GetSizeInBits()) { + case kBRegSize: + reg_char = 'b'; + break; + case kHRegSize: + reg_char = 'h'; + break; + case kSRegSize: + reg_char = 's'; + break; + case kDRegSize: + reg_char = 'd'; + break; + default: + VIXL_ASSERT(reg.Is128Bits()); + reg_char = 'q'; + } + } + + if (reg.IsVRegister() || !(reg.Aliases(sp) || reg.Aliases(xzr))) { + // A core or scalar/vector register: [wx]0 - 30, [bhsdq]0 - 31. + AppendToOutput("%c%d", reg_char, reg.GetCode()); + } else if (reg.Aliases(sp)) { + // Disassemble w31/x31 as stack pointer wsp/sp. + AppendToOutput("%s", reg.Is64Bits() ? "sp" : "wsp"); + } else { + // Disassemble w31/x31 as zero register wzr/xzr. + AppendToOutput("%czr", reg_char); + } +} + + +void Disassembler::AppendPCRelativeOffsetToOutput(const Instruction *instr, + int64_t offset) { + USE(instr); + if (offset < 0) { + // Cast to uint64_t so that INT64_MIN is handled in a well-defined way. + uint64_t abs_offset = -static_cast(offset); + AppendToOutput("#-0x%" PRIx64, abs_offset); + } else { + AppendToOutput("#+0x%" PRIx64, offset); + } +} + + +void Disassembler::AppendAddressToOutput(const Instruction *instr, + const void *addr) { + USE(instr); + AppendToOutput("(addr 0x%" PRIxPTR ")", reinterpret_cast(addr)); +} + + +void Disassembler::AppendCodeAddressToOutput(const Instruction *instr, + const void *addr) { + AppendAddressToOutput(instr, addr); +} + + +void Disassembler::AppendDataAddressToOutput(const Instruction *instr, + const void *addr) { + AppendAddressToOutput(instr, addr); +} + + +void Disassembler::AppendCodeRelativeAddressToOutput(const Instruction *instr, + const void *addr) { + USE(instr); + int64_t rel_addr = CodeRelativeAddress(addr); + if (rel_addr >= 0) { + AppendToOutput("(addr 0x%" PRIx64 ")", rel_addr); + } else { + AppendToOutput("(addr -0x%" PRIx64 ")", -rel_addr); + } +} + + +void Disassembler::AppendCodeRelativeCodeAddressToOutput( + const Instruction *instr, const void *addr) { + AppendCodeRelativeAddressToOutput(instr, addr); +} + + +void Disassembler::AppendCodeRelativeDataAddressToOutput( + const Instruction *instr, const void *addr) { + AppendCodeRelativeAddressToOutput(instr, addr); +} + + +void Disassembler::MapCodeAddress(int64_t base_address, + const Instruction *instr_address) { + set_code_address_offset(base_address - + reinterpret_cast(instr_address)); +} +int64_t Disassembler::CodeRelativeAddress(const void *addr) { + return reinterpret_cast(addr) + code_address_offset(); +} + + +void Disassembler::Format(const Instruction *instr, + const char *mnemonic, + const char *format) { + VIXL_ASSERT(mnemonic != NULL); + ResetOutput(); + Substitute(instr, mnemonic); + if (format != NULL) { + VIXL_ASSERT(buffer_pos_ < buffer_size_); + buffer_[buffer_pos_++] = ' '; + Substitute(instr, format); + } + VIXL_ASSERT(buffer_pos_ < buffer_size_); + buffer_[buffer_pos_] = 0; + ProcessOutput(instr); +} + + +void Disassembler::Substitute(const Instruction *instr, const char *string) { + char chr = *string++; + while (chr != '\0') { + if (chr == '\'') { + string += SubstituteField(instr, string); + } else { + VIXL_ASSERT(buffer_pos_ < buffer_size_); + buffer_[buffer_pos_++] = chr; + } + chr = *string++; + } +} + + +int Disassembler::SubstituteField(const Instruction *instr, + const char *format) { + switch (format[0]) { + // NB. The remaining substitution prefix characters are: GJKUZ. + case 'R': // Register. X or W, selected by sf bit. + case 'F': // FP register. S or D, selected by type field. + case 'V': // Vector register, V, vector format. + case 'W': + case 'X': + case 'B': + case 'H': + case 'S': + case 'D': + case 'Q': + return SubstituteRegisterField(instr, format); + case 'I': + return SubstituteImmediateField(instr, format); + case 'L': + return SubstituteLiteralField(instr, format); + case 'N': + return SubstituteShiftField(instr, format); + case 'P': + return SubstitutePrefetchField(instr, format); + case 'C': + return SubstituteConditionField(instr, format); + case 'E': + return SubstituteExtendField(instr, format); + case 'A': + return SubstitutePCRelAddressField(instr, format); + case 'T': + return SubstituteBranchTargetField(instr, format); + case 'O': + return SubstituteLSRegOffsetField(instr, format); + case 'M': + return SubstituteBarrierField(instr, format); + case 'K': + return SubstituteCrField(instr, format); + case 'G': + return SubstituteSysOpField(instr, format); + default: { + VIXL_UNREACHABLE(); + return 1; + } + } +} + + +int Disassembler::SubstituteRegisterField(const Instruction *instr, + const char *format) { + char reg_prefix = format[0]; + unsigned reg_num = 0; + unsigned field_len = 2; + + switch (format[1]) { + case 'd': + reg_num = instr->GetRd(); + if (format[2] == 'q') { + reg_prefix = instr->GetNEONQ() ? 'X' : 'W'; + field_len = 3; + } + break; + case 'n': + reg_num = instr->GetRn(); + break; + case 'm': + reg_num = instr->GetRm(); + switch (format[2]) { + // Handle registers tagged with b (bytes), z (instruction), or + // r (registers), used for address updates in + // NEON load/store instructions. + case 'r': + case 'b': + case 'z': { + field_len = 3; + char *eimm; + int imm = static_cast(strtol(&format[3], &eimm, 10)); + field_len += eimm - &format[3]; + if (reg_num == 31) { + switch (format[2]) { + case 'z': + imm *= (1 << instr->GetNEONLSSize()); + break; + case 'r': + imm *= (instr->GetNEONQ() == 0) ? kDRegSizeInBytes + : kQRegSizeInBytes; + break; + case 'b': + break; + } + AppendToOutput("#%d", imm); + return field_len; + } + break; + } + } + break; + case 'e': + // This is register Rm, but using a 4-bit specifier. Used in NEON + // by-element instructions. + reg_num = instr->GetRmLow16(); + break; + case 'a': + reg_num = instr->GetRa(); + break; + case 's': + reg_num = instr->GetRs(); + break; + case 't': + reg_num = instr->GetRt(); + if (format[0] == 'V') { + if ((format[2] >= '2') && (format[2] <= '4')) { + // Handle consecutive vector register specifiers Vt2, Vt3 and Vt4. + reg_num = (reg_num + format[2] - '1') % 32; + field_len = 3; + } + } else { + if (format[2] == '2') { + // Handle register specifier Rt2. + reg_num = instr->GetRt2(); + field_len = 3; + } + } + break; + case '(': { + switch (format[2]) { + case 's': + reg_num = instr->GetRs(); + break; + case 't': + reg_num = instr->GetRt(); + break; + default: + VIXL_UNREACHABLE(); + } + + VIXL_ASSERT(format[3] == '+'); + int i = 4; + int addition = 0; + while (format[i] != ')') { + VIXL_ASSERT((format[i] >= '0') && (format[i] <= '9')); + addition *= 10; + addition += format[i] - '0'; + ++i; + } + reg_num += addition; + field_len = i + 1; + break; + } + default: + VIXL_UNREACHABLE(); + } + + // Increase field length for registers tagged as stack. + if (format[1] != '(' && format[2] == 's') { + field_len = 3; + } + + CPURegister::RegisterType reg_type = CPURegister::kRegister; + unsigned reg_size = kXRegSize; + + switch (reg_prefix) { + case 'R': + reg_prefix = instr->GetSixtyFourBits() ? 'X' : 'W'; + break; + case 'F': + switch (instr->GetFPType()) { + case 3: + reg_prefix = 'H'; + break; + case 0: + reg_prefix = 'S'; + break; + default: + reg_prefix = 'D'; + } + } + + switch (reg_prefix) { + case 'W': + reg_type = CPURegister::kRegister; + reg_size = kWRegSize; + break; + case 'X': + reg_type = CPURegister::kRegister; + reg_size = kXRegSize; + break; + case 'B': + reg_type = CPURegister::kVRegister; + reg_size = kBRegSize; + break; + case 'H': + reg_type = CPURegister::kVRegister; + reg_size = kHRegSize; + break; + case 'S': + reg_type = CPURegister::kVRegister; + reg_size = kSRegSize; + break; + case 'D': + reg_type = CPURegister::kVRegister; + reg_size = kDRegSize; + break; + case 'Q': + reg_type = CPURegister::kVRegister; + reg_size = kQRegSize; + break; + case 'V': + AppendToOutput("v%d", reg_num); + return field_len; + default: + VIXL_UNREACHABLE(); + } + + if ((reg_type == CPURegister::kRegister) && (reg_num == kZeroRegCode) && + (format[2] == 's')) { + reg_num = kSPRegInternalCode; + } + + AppendRegisterNameToOutput(instr, CPURegister(reg_num, reg_size, reg_type)); + + return field_len; +} + + +int Disassembler::SubstituteImmediateField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(format[0] == 'I'); + + switch (format[1]) { + case 'M': { // IMoveImm, IMoveNeg or IMoveLSL. + if (format[5] == 'L') { + AppendToOutput("#0x%" PRIx32, instr->GetImmMoveWide()); + if (instr->GetShiftMoveWide() > 0) { + AppendToOutput(", lsl #%" PRId32, 16 * instr->GetShiftMoveWide()); + } + } else { + VIXL_ASSERT((format[5] == 'I') || (format[5] == 'N')); + uint64_t imm = static_cast(instr->GetImmMoveWide()) + << (16 * instr->GetShiftMoveWide()); + if (format[5] == 'N') imm = ~imm; + if (!instr->GetSixtyFourBits()) imm &= UINT64_C(0xffffffff); + AppendToOutput("#0x%" PRIx64, imm); + } + return 8; + } + case 'L': { + switch (format[2]) { + case 'L': { // ILLiteral - Immediate Load Literal. + AppendToOutput("pc%+" PRId32, + instr->GetImmLLiteral() * + static_cast(kLiteralEntrySize)); + return 9; + } + case 'S': { // ILS - Immediate Load/Store. + // ILSi - As above, but an index field which must not be + // omitted even if it is zero. + bool is_index = format[3] == 'i'; + if (is_index || (instr->GetImmLS() != 0)) { + AppendToOutput(", #%" PRId32, instr->GetImmLS()); + } + return is_index ? 4 : 3; + } + case 'P': { // ILPx - Immediate Load/Store Pair, x = access size. + // ILPxi - As above, but an index field which must not be + // omitted even if it is zero. + VIXL_ASSERT((format[3] >= '0') && (format[3] <= '9')); + bool is_index = format[4] == 'i'; + if (is_index || (instr->GetImmLSPair() != 0)) { + // format[3] is the scale value. Convert to a number. + int scale = 1 << (format[3] - '0'); + AppendToOutput(", #%" PRId32, instr->GetImmLSPair() * scale); + } + return is_index ? 5 : 4; + } + case 'U': { // ILU - Immediate Load/Store Unsigned. + if (instr->GetImmLSUnsigned() != 0) { + int shift = instr->GetSizeLS(); + AppendToOutput(", #%" PRId32, instr->GetImmLSUnsigned() << shift); + } + return 3; + } + case 'F': { // ILF(CNR) - Immediate Rotation Value for Complex Numbers + AppendToOutput("#%" PRId32, instr->GetImmRotFcmlaSca() * 90); + return strlen("ILFCNR"); + } + case 'A': { // ILA - Immediate Load with pointer authentication. + if (instr->GetImmLSPAC() != 0) { + AppendToOutput(", #%" PRId32, instr->GetImmLSPAC()); + } + return 3; + } + default: { + VIXL_UNIMPLEMENTED(); + return 0; + } + } + } + case 'C': { // ICondB - Immediate Conditional Branch. + int64_t offset = instr->GetImmCondBranch() << 2; + AppendPCRelativeOffsetToOutput(instr, offset); + return 6; + } + case 'A': { // IAddSub. + VIXL_ASSERT(instr->GetShiftAddSub() <= 1); + int64_t imm = instr->GetImmAddSub() << (12 * instr->GetShiftAddSub()); + AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm); + return 7; + } + case 'F': { // IFPHalf, IFPSingle, IFPDouble, or IFPFBits. + if (format[3] == 'F') { // IFPFbits. + AppendToOutput("#%" PRId32, 64 - instr->GetFPScale()); + return 8; + } else { + AppendToOutput("#0x%" PRIx32 " (%.4f)", + instr->GetImmFP(), + format[3] == 'H' + ? FPToFloat(instr->GetImmFP16(), kIgnoreDefaultNaN) + : (format[3] == 'S') ? instr->GetImmFP32() + : instr->GetImmFP64()); + if (format[3] == 'H') { + return 7; + } else { + return 9; + } + } + } + case 'H': { // IH - ImmHint + AppendToOutput("#%" PRId32, instr->GetImmHint()); + return 2; + } + case 'T': { // ITri - Immediate Triangular Encoded. + AppendToOutput("#0x%" PRIx64, instr->GetImmLogical()); + return 4; + } + case 'N': { // INzcv. + int nzcv = (instr->GetNzcv() << Flags_offset); + AppendToOutput("#%c%c%c%c", + ((nzcv & NFlag) == 0) ? 'n' : 'N', + ((nzcv & ZFlag) == 0) ? 'z' : 'Z', + ((nzcv & CFlag) == 0) ? 'c' : 'C', + ((nzcv & VFlag) == 0) ? 'v' : 'V'); + return 5; + } + case 'P': { // IP - Conditional compare. + AppendToOutput("#%" PRId32, instr->GetImmCondCmp()); + return 2; + } + case 'B': { // Bitfields. + return SubstituteBitfieldImmediateField(instr, format); + } + case 'E': { // IExtract. + AppendToOutput("#%" PRId32, instr->GetImmS()); + return 8; + } + case 'S': { // IS - Test and branch bit. + AppendToOutput("#%" PRId32, + (instr->GetImmTestBranchBit5() << 5) | + instr->GetImmTestBranchBit40()); + return 2; + } + case 's': { // Is - Shift (immediate). + switch (format[2]) { + case '1': { // Is1 - SSHR. + int shift = 16 << HighestSetBitPosition(instr->GetImmNEONImmh()); + shift -= instr->GetImmNEONImmhImmb(); + AppendToOutput("#%d", shift); + return 3; + } + case '2': { // Is2 - SLI. + int shift = instr->GetImmNEONImmhImmb(); + shift -= 8 << HighestSetBitPosition(instr->GetImmNEONImmh()); + AppendToOutput("#%d", shift); + return 3; + } + default: { + VIXL_UNIMPLEMENTED(); + return 0; + } + } + } + case 'D': { // IDebug - HLT and BRK instructions. + AppendToOutput("#0x%" PRIx32, instr->GetImmException()); + return 6; + } + case 'V': { // Immediate Vector. + switch (format[2]) { + case 'F': { + switch (format[5]) { + // Convert 'rot' bit encodings into equivalent angle rotation + case 'A': + AppendToOutput("#%" PRId32, + instr->GetImmRotFcadd() == 1 ? 270 : 90); + break; + case 'M': + AppendToOutput("#%" PRId32, instr->GetImmRotFcmlaVec() * 90); + break; + } + return strlen("IVFCN") + 1; + } + case 'E': { // IVExtract. + AppendToOutput("#%" PRId32, instr->GetImmNEONExt()); + return 9; + } + case 'B': { // IVByElemIndex. + int ret = strlen("IVByElemIndex"); + int vm_index = (instr->GetNEONH() << 1) | instr->GetNEONL(); + static const char *format_rot = "IVByElemIndexRot"; + static const char *format_fhm = "IVByElemIndexFHM"; + bool is_fhm = strncmp(format, format_fhm, strlen(format_fhm)) == 0; + if (strncmp(format, format_rot, strlen(format_rot)) == 0) { + // FCMLA uses 'H' bit index when SIZE is 2, else H:L + if (instr->GetNEONSize() == 2) { + vm_index = instr->GetNEONH(); + } + ret = static_cast(strlen(format_rot)); + } else if (is_fhm || (instr->GetNEONSize() == 0)) { + // Half-precision FP ops use H:L:M bit index + // Widening operations with H-sized operands also use H:L:M. + vm_index = (instr->GetNEONH() << 2) | (instr->GetNEONL() << 1) | + instr->GetNEONM(); + if (is_fhm) ret = static_cast(strlen(format_fhm)); + } else if (instr->GetNEONSize() == 1) { + vm_index = (vm_index << 1) | instr->GetNEONM(); + } + AppendToOutput("%d", vm_index); + return ret; + } + case 'I': { // INS element. + if (strncmp(format, "IVInsIndex", strlen("IVInsIndex")) == 0) { + unsigned rd_index, rn_index; + unsigned imm5 = instr->GetImmNEON5(); + unsigned imm4 = instr->GetImmNEON4(); + int tz = CountTrailingZeros(imm5, 32); + if (tz <= 3) { // Defined for tz = 0 to 3 only. + rd_index = imm5 >> (tz + 1); + rn_index = imm4 >> tz; + if (strncmp(format, "IVInsIndex1", strlen("IVInsIndex1")) == 0) { + AppendToOutput("%d", rd_index); + return strlen("IVInsIndex1"); + } else if (strncmp(format, + "IVInsIndex2", + strlen("IVInsIndex2")) == 0) { + AppendToOutput("%d", rn_index); + return strlen("IVInsIndex2"); + } + } + return 0; + } + VIXL_FALLTHROUGH(); + } + case 'L': { // IVLSLane[0123] - suffix indicates access size shift. + AppendToOutput("%d", instr->GetNEONLSIndex(format[8] - '0')); + return 9; + } + case 'M': { // Modified Immediate cases. + if (strncmp(format, "IVMIImmFPHalf", strlen("IVMIImmFPHalf")) == 0) { + AppendToOutput("#0x%" PRIx32 " (%.4f)", + instr->GetImmNEONabcdefgh(), + FPToFloat(instr->GetImmNEONFP16(), + kIgnoreDefaultNaN)); + return strlen("IVMIImmFPHalf"); + } else if (strncmp(format, + "IVMIImmFPSingle", + strlen("IVMIImmFPSingle")) == 0) { + AppendToOutput("#0x%" PRIx32 " (%.4f)", + instr->GetImmNEONabcdefgh(), + instr->GetImmNEONFP32()); + return strlen("IVMIImmFPSingle"); + } else if (strncmp(format, + "IVMIImmFPDouble", + strlen("IVMIImmFPDouble")) == 0) { + AppendToOutput("#0x%" PRIx32 " (%.4f)", + instr->GetImmNEONabcdefgh(), + instr->GetImmNEONFP64()); + return strlen("IVMIImmFPDouble"); + } else if (strncmp(format, "IVMIImm8", strlen("IVMIImm8")) == 0) { + uint64_t imm8 = instr->GetImmNEONabcdefgh(); + AppendToOutput("#0x%" PRIx64, imm8); + return strlen("IVMIImm8"); + } else if (strncmp(format, "IVMIImm", strlen("IVMIImm")) == 0) { + uint64_t imm8 = instr->GetImmNEONabcdefgh(); + uint64_t imm = 0; + for (int i = 0; i < 8; ++i) { + if (imm8 & (1 << i)) { + imm |= (UINT64_C(0xff) << (8 * i)); + } + } + AppendToOutput("#0x%" PRIx64, imm); + return strlen("IVMIImm"); + } else if (strncmp(format, + "IVMIShiftAmt1", + strlen("IVMIShiftAmt1")) == 0) { + int cmode = instr->GetNEONCmode(); + int shift_amount = 8 * ((cmode >> 1) & 3); + AppendToOutput("#%d", shift_amount); + return strlen("IVMIShiftAmt1"); + } else if (strncmp(format, + "IVMIShiftAmt2", + strlen("IVMIShiftAmt2")) == 0) { + int cmode = instr->GetNEONCmode(); + int shift_amount = 8 << (cmode & 1); + AppendToOutput("#%d", shift_amount); + return strlen("IVMIShiftAmt2"); + } else { + VIXL_UNIMPLEMENTED(); + return 0; + } + } + default: { + VIXL_UNIMPLEMENTED(); + return 0; + } + } + } + case 'X': { // IX - CLREX instruction. + AppendToOutput("#0x%" PRIx32, instr->GetCRm()); + return 2; + } + case 'Y': { // IY - system register immediate. + switch (instr->GetImmSystemRegister()) { + case NZCV: + AppendToOutput("nzcv"); + break; + case FPCR: + AppendToOutput("fpcr"); + break; + default: + AppendToOutput("S%d_%d_c%d_c%d_%d", + instr->GetSysOp0(), + instr->GetSysOp1(), + instr->GetCRn(), + instr->GetCRm(), + instr->GetSysOp2()); + break; + } + return 2; + } + case 'R': { // IR - Rotate right into flags. + switch (format[2]) { + case 'r': { // IRr - Rotate amount. + AppendToOutput("#%d", instr->GetImmRMIFRotation()); + return 3; + } + default: { + VIXL_UNIMPLEMENTED(); + return 0; + } + } + } + default: { + VIXL_UNIMPLEMENTED(); + return 0; + } + } +} + + +int Disassembler::SubstituteBitfieldImmediateField(const Instruction *instr, + const char *format) { + VIXL_ASSERT((format[0] == 'I') && (format[1] == 'B')); + unsigned r = instr->GetImmR(); + unsigned s = instr->GetImmS(); + + switch (format[2]) { + case 'r': { // IBr. + AppendToOutput("#%d", r); + return 3; + } + case 's': { // IBs+1 or IBs-r+1. + if (format[3] == '+') { + AppendToOutput("#%d", s + 1); + return 5; + } else { + VIXL_ASSERT(format[3] == '-'); + AppendToOutput("#%d", s - r + 1); + return 7; + } + } + case 'Z': { // IBZ-r. + VIXL_ASSERT((format[3] == '-') && (format[4] == 'r')); + unsigned reg_size = + (instr->GetSixtyFourBits() == 1) ? kXRegSize : kWRegSize; + AppendToOutput("#%d", reg_size - r); + return 5; + } + default: { + VIXL_UNREACHABLE(); + return 0; + } + } +} + + +int Disassembler::SubstituteLiteralField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(strncmp(format, "LValue", 6) == 0); + USE(format); + + const void *address = instr->GetLiteralAddress(); + switch (instr->Mask(LoadLiteralMask)) { + case LDR_w_lit: + case LDR_x_lit: + case LDRSW_x_lit: + case LDR_s_lit: + case LDR_d_lit: + case LDR_q_lit: + AppendCodeRelativeDataAddressToOutput(instr, address); + break; + case PRFM_lit: { + // Use the prefetch hint to decide how to print the address. + switch (instr->GetPrefetchHint()) { + case 0x0: // PLD: prefetch for load. + case 0x2: // PST: prepare for store. + AppendCodeRelativeDataAddressToOutput(instr, address); + break; + case 0x1: // PLI: preload instructions. + AppendCodeRelativeCodeAddressToOutput(instr, address); + break; + case 0x3: // Unallocated hint. + AppendCodeRelativeAddressToOutput(instr, address); + break; + } + break; + } + default: + VIXL_UNREACHABLE(); + } + + return 6; +} + + +int Disassembler::SubstituteShiftField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(format[0] == 'N'); + VIXL_ASSERT(instr->GetShiftDP() <= 0x3); + + switch (format[1]) { + case 'D': { // HDP. + VIXL_ASSERT(instr->GetShiftDP() != ROR); + VIXL_FALLTHROUGH(); + } + case 'L': { // HLo. + if (instr->GetImmDPShift() != 0) { + const char *shift_type[] = {"lsl", "lsr", "asr", "ror"}; + AppendToOutput(", %s #%" PRId32, + shift_type[instr->GetShiftDP()], + instr->GetImmDPShift()); + } + return 3; + } + default: + VIXL_UNIMPLEMENTED(); + return 0; + } +} + + +int Disassembler::SubstituteConditionField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(format[0] == 'C'); + const char *condition_code[] = {"eq", + "ne", + "hs", + "lo", + "mi", + "pl", + "vs", + "vc", + "hi", + "ls", + "ge", + "lt", + "gt", + "le", + "al", + "nv"}; + int cond; + switch (format[1]) { + case 'B': + cond = instr->GetConditionBranch(); + break; + case 'I': { + cond = InvertCondition(static_cast(instr->GetCondition())); + break; + } + default: + cond = instr->GetCondition(); + } + AppendToOutput("%s", condition_code[cond]); + return 4; +} + + +int Disassembler::SubstitutePCRelAddressField(const Instruction *instr, + const char *format) { + VIXL_ASSERT((strcmp(format, "AddrPCRelByte") == 0) || // Used by `adr`. + (strcmp(format, "AddrPCRelPage") == 0)); // Used by `adrp`. + + int64_t offset = instr->GetImmPCRel(); + + // Compute the target address based on the effective address (after applying + // code_address_offset). This is required for correct behaviour of adrp. + const Instruction *base = instr + code_address_offset(); + if (format[9] == 'P') { + offset *= kPageSize; + base = AlignDown(base, kPageSize); + } + // Strip code_address_offset before printing, so we can use the + // semantically-correct AppendCodeRelativeAddressToOutput. + const void *target = + reinterpret_cast(base + offset - code_address_offset()); + + AppendPCRelativeOffsetToOutput(instr, offset); + AppendToOutput(" "); + AppendCodeRelativeAddressToOutput(instr, target); + return 13; +} + + +int Disassembler::SubstituteBranchTargetField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(strncmp(format, "TImm", 4) == 0); + + int64_t offset = 0; + switch (format[5]) { + // BImmUncn - unconditional branch immediate. + case 'n': + offset = instr->GetImmUncondBranch(); + break; + // BImmCond - conditional branch immediate. + case 'o': + offset = instr->GetImmCondBranch(); + break; + // BImmCmpa - compare and branch immediate. + case 'm': + offset = instr->GetImmCmpBranch(); + break; + // BImmTest - test and branch immediate. + case 'e': + offset = instr->GetImmTestBranch(); + break; + default: + VIXL_UNIMPLEMENTED(); + } + offset *= static_cast(kInstructionSize); + const void *target_address = reinterpret_cast(instr + offset); + VIXL_STATIC_ASSERT(sizeof(*instr) == 1); + + AppendPCRelativeOffsetToOutput(instr, offset); + AppendToOutput(" "); + AppendCodeRelativeCodeAddressToOutput(instr, target_address); + + return 8; +} + + +int Disassembler::SubstituteExtendField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(strncmp(format, "Ext", 3) == 0); + VIXL_ASSERT(instr->GetExtendMode() <= 7); + USE(format); + + const char *extend_mode[] = + {"uxtb", "uxth", "uxtw", "uxtx", "sxtb", "sxth", "sxtw", "sxtx"}; + + // If rd or rn is SP, uxtw on 32-bit registers and uxtx on 64-bit + // registers becomes lsl. + if (((instr->GetRd() == kZeroRegCode) || (instr->GetRn() == kZeroRegCode)) && + (((instr->GetExtendMode() == UXTW) && (instr->GetSixtyFourBits() == 0)) || + (instr->GetExtendMode() == UXTX))) { + if (instr->GetImmExtendShift() > 0) { + AppendToOutput(", lsl #%" PRId32, instr->GetImmExtendShift()); + } + } else { + AppendToOutput(", %s", extend_mode[instr->GetExtendMode()]); + if (instr->GetImmExtendShift() > 0) { + AppendToOutput(" #%" PRId32, instr->GetImmExtendShift()); + } + } + return 3; +} + + +int Disassembler::SubstituteLSRegOffsetField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(strncmp(format, "Offsetreg", 9) == 0); + const char *extend_mode[] = {"undefined", + "undefined", + "uxtw", + "lsl", + "undefined", + "undefined", + "sxtw", + "sxtx"}; + USE(format); + + unsigned shift = instr->GetImmShiftLS(); + Extend ext = static_cast(instr->GetExtendMode()); + char reg_type = ((ext == UXTW) || (ext == SXTW)) ? 'w' : 'x'; + + unsigned rm = instr->GetRm(); + if (rm == kZeroRegCode) { + AppendToOutput("%czr", reg_type); + } else { + AppendToOutput("%c%d", reg_type, rm); + } + + // Extend mode UXTX is an alias for shift mode LSL here. + if (!((ext == UXTX) && (shift == 0))) { + AppendToOutput(", %s", extend_mode[ext]); + if (shift != 0) { + AppendToOutput(" #%d", instr->GetSizeLS()); + } + } + return 9; +} + + +int Disassembler::SubstitutePrefetchField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(format[0] == 'P'); + USE(format); + + static const char *hints[] = {"ld", "li", "st"}; + static const char *stream_options[] = {"keep", "strm"}; + + unsigned hint = instr->GetPrefetchHint(); + unsigned target = instr->GetPrefetchTarget() + 1; + unsigned stream = instr->GetPrefetchStream(); + + if ((hint >= ArrayLength(hints)) || (target > 3)) { + // Unallocated prefetch operations. + int prefetch_mode = instr->GetImmPrefetchOperation(); + AppendToOutput("#0b%c%c%c%c%c", + (prefetch_mode & (1 << 4)) ? '1' : '0', + (prefetch_mode & (1 << 3)) ? '1' : '0', + (prefetch_mode & (1 << 2)) ? '1' : '0', + (prefetch_mode & (1 << 1)) ? '1' : '0', + (prefetch_mode & (1 << 0)) ? '1' : '0'); + } else { + VIXL_ASSERT(stream < ArrayLength(stream_options)); + AppendToOutput("p%sl%d%s", hints[hint], target, stream_options[stream]); + } + return 6; +} + +int Disassembler::SubstituteBarrierField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(format[0] == 'M'); + USE(format); + + static const char *options[4][4] = {{"sy (0b0000)", "oshld", "oshst", "osh"}, + {"sy (0b0100)", "nshld", "nshst", "nsh"}, + {"sy (0b1000)", "ishld", "ishst", "ish"}, + {"sy (0b1100)", "ld", "st", "sy"}}; + int domain = instr->GetImmBarrierDomain(); + int type = instr->GetImmBarrierType(); + + AppendToOutput("%s", options[domain][type]); + return 1; +} + +int Disassembler::SubstituteSysOpField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(format[0] == 'G'); + int op = -1; + switch (format[1]) { + case '1': + op = instr->GetSysOp1(); + break; + case '2': + op = instr->GetSysOp2(); + break; + default: + VIXL_UNREACHABLE(); + } + AppendToOutput("#%d", op); + return 2; +} + +int Disassembler::SubstituteCrField(const Instruction *instr, + const char *format) { + VIXL_ASSERT(format[0] == 'K'); + int cr = -1; + switch (format[1]) { + case 'n': + cr = instr->GetCRn(); + break; + case 'm': + cr = instr->GetCRm(); + break; + default: + VIXL_UNREACHABLE(); + } + AppendToOutput("C%d", cr); + return 2; +} + +void Disassembler::ResetOutput() { + buffer_pos_ = 0; + buffer_[buffer_pos_] = 0; +} + + +void Disassembler::AppendToOutput(const char *format, ...) { + va_list args; + va_start(args, format); + buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], + buffer_size_ - buffer_pos_, + format, + args); + va_end(args); +} + + +void PrintDisassembler::Disassemble(const Instruction *instr) { + Decoder decoder; + if (cpu_features_auditor_ != NULL) { + decoder.AppendVisitor(cpu_features_auditor_); + } + decoder.AppendVisitor(this); + decoder.Decode(instr); +} + +void PrintDisassembler::DisassembleBuffer(const Instruction *start, + const Instruction *end) { + Decoder decoder; + if (cpu_features_auditor_ != NULL) { + decoder.AppendVisitor(cpu_features_auditor_); + } + decoder.AppendVisitor(this); + decoder.Decode(start, end); +} + +void PrintDisassembler::DisassembleBuffer(const Instruction *start, + uint64_t size) { + DisassembleBuffer(start, start + size); +} + + +void PrintDisassembler::ProcessOutput(const Instruction *instr) { + int bytes_printed = fprintf(stream_, + "0x%016" PRIx64 " %08" PRIx32 "\t\t%s", + reinterpret_cast(instr), + instr->GetInstructionBits(), + GetOutput()); + if (cpu_features_auditor_ != NULL) { + CPUFeatures needs = cpu_features_auditor_->GetInstructionFeatures(); + needs.Remove(cpu_features_auditor_->GetAvailableFeatures()); + if (needs != CPUFeatures::None()) { + // Try to align annotations. This value is arbitrary, but based on looking + // good with most instructions. Note that, for historical reasons, the + // disassembly itself is printed with tab characters, so bytes_printed is + // _not_ equivalent to the number of occupied screen columns. However, the + // prefix before the tabs is always the same length, so the annotation + // indentation does not change from one line to the next. + const int indent_to = 70; + // Always allow some space between the instruction and the annotation. + const int min_pad = 2; + + int pad = std::max(min_pad, (indent_to - bytes_printed)); + fprintf(stream_, "%*s", pad, ""); + + std::stringstream features; + features << needs; + fprintf(stream_, + "%s%s%s", + cpu_features_prefix_, + features.str().c_str(), + cpu_features_suffix_); + } + } + fprintf(stream_, "\n"); +} + +} // namespace aarch64 +} // namespace vixl diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/disasm-aarch64.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/disasm-aarch64.h new file mode 100644 index 00000000..c650bee9 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/disasm-aarch64.h @@ -0,0 +1,217 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_DISASM_AARCH64_H +#define VIXL_AARCH64_DISASM_AARCH64_H + +#include "../globals-vixl.h" +#include "../utils-vixl.h" + +#include "cpu-features-auditor-aarch64.h" +#include "decoder-aarch64.h" +#include "instructions-aarch64.h" +#include "operands-aarch64.h" + +namespace vixl { +namespace aarch64 { + +class Disassembler : public DecoderVisitor { + public: + Disassembler(); + Disassembler(char* text_buffer, int buffer_size); + virtual ~Disassembler(); + char* GetOutput(); + +// Declare all Visitor functions. +#define DECLARE(A) \ + virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE; + VISITOR_LIST(DECLARE) +#undef DECLARE + + protected: + virtual void ProcessOutput(const Instruction* instr); + + // Default output functions. The functions below implement a default way of + // printing elements in the disassembly. A sub-class can override these to + // customize the disassembly output. + + // Prints the name of a register. + // TODO: This currently doesn't allow renaming of V registers. + virtual void AppendRegisterNameToOutput(const Instruction* instr, + const CPURegister& reg); + + // Prints a PC-relative offset. This is used for example when disassembling + // branches to immediate offsets. + virtual void AppendPCRelativeOffsetToOutput(const Instruction* instr, + int64_t offset); + + // Prints an address, in the general case. It can be code or data. This is + // used for example to print the target address of an ADR instruction. + virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr, + const void* addr); + + // Prints the address of some code. + // This is used for example to print the target address of a branch to an + // immediate offset. + // A sub-class can for example override this method to lookup the address and + // print an appropriate name. + virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr, + const void* addr); + + // Prints the address of some data. + // This is used for example to print the source address of a load literal + // instruction. + virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr, + const void* addr); + + // Same as the above, but for addresses that are not relative to the code + // buffer. They are currently not used by VIXL. + virtual void AppendAddressToOutput(const Instruction* instr, + const void* addr); + virtual void AppendCodeAddressToOutput(const Instruction* instr, + const void* addr); + virtual void AppendDataAddressToOutput(const Instruction* instr, + const void* addr); + + public: + // Get/Set the offset that should be added to code addresses when printing + // code-relative addresses in the AppendCodeRelativeAddressToOutput() + // helpers. + // Below is an example of how a branch immediate instruction in memory at + // address 0xb010200 would disassemble with different offsets. + // Base address | Disassembly + // 0x0 | 0xb010200: b #+0xcc (addr 0xb0102cc) + // 0x10000 | 0xb000200: b #+0xcc (addr 0xb0002cc) + // 0xb010200 | 0x0: b #+0xcc (addr 0xcc) + void MapCodeAddress(int64_t base_address, const Instruction* instr_address); + int64_t CodeRelativeAddress(const void* instr); + + private: + void Format(const Instruction* instr, + const char* mnemonic, + const char* format); + void Substitute(const Instruction* instr, const char* string); + int SubstituteField(const Instruction* instr, const char* format); + int SubstituteRegisterField(const Instruction* instr, const char* format); + int SubstituteImmediateField(const Instruction* instr, const char* format); + int SubstituteLiteralField(const Instruction* instr, const char* format); + int SubstituteBitfieldImmediateField(const Instruction* instr, + const char* format); + int SubstituteShiftField(const Instruction* instr, const char* format); + int SubstituteExtendField(const Instruction* instr, const char* format); + int SubstituteConditionField(const Instruction* instr, const char* format); + int SubstitutePCRelAddressField(const Instruction* instr, const char* format); + int SubstituteBranchTargetField(const Instruction* instr, const char* format); + int SubstituteLSRegOffsetField(const Instruction* instr, const char* format); + int SubstitutePrefetchField(const Instruction* instr, const char* format); + int SubstituteBarrierField(const Instruction* instr, const char* format); + int SubstituteSysOpField(const Instruction* instr, const char* format); + int SubstituteCrField(const Instruction* instr, const char* format); + bool RdIsZROrSP(const Instruction* instr) const { + return (instr->GetRd() == kZeroRegCode); + } + + bool RnIsZROrSP(const Instruction* instr) const { + return (instr->GetRn() == kZeroRegCode); + } + + bool RmIsZROrSP(const Instruction* instr) const { + return (instr->GetRm() == kZeroRegCode); + } + + bool RaIsZROrSP(const Instruction* instr) const { + return (instr->GetRa() == kZeroRegCode); + } + + bool IsMovzMovnImm(unsigned reg_size, uint64_t value); + + int64_t code_address_offset() const { return code_address_offset_; } + + protected: + void ResetOutput(); + void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3); + + void set_code_address_offset(int64_t code_address_offset) { + code_address_offset_ = code_address_offset; + } + + char* buffer_; + uint32_t buffer_pos_; + uint32_t buffer_size_; + bool own_buffer_; + + int64_t code_address_offset_; +}; + + +class PrintDisassembler : public Disassembler { + public: + explicit PrintDisassembler(FILE* stream) + : cpu_features_auditor_(NULL), + cpu_features_prefix_("// Needs: "), + cpu_features_suffix_(""), + stream_(stream) {} + + // Convenience helpers for quick disassembly, without having to manually + // create a decoder. + void DisassembleBuffer(const Instruction* start, uint64_t size); + void DisassembleBuffer(const Instruction* start, const Instruction* end); + void Disassemble(const Instruction* instr); + + // If a CPUFeaturesAuditor is specified, it will be used to annotate + // disassembly. The CPUFeaturesAuditor is expected to visit the instructions + // _before_ the disassembler, such that the CPUFeatures information is + // available when the disassembler is called. + void RegisterCPUFeaturesAuditor(CPUFeaturesAuditor* auditor) { + cpu_features_auditor_ = auditor; + } + + // Set the prefix to appear before the CPU features annotations. + void SetCPUFeaturesPrefix(const char* prefix) { + VIXL_ASSERT(prefix != NULL); + cpu_features_prefix_ = prefix; + } + + // Set the suffix to appear after the CPU features annotations. + void SetCPUFeaturesSuffix(const char* suffix) { + VIXL_ASSERT(suffix != NULL); + cpu_features_suffix_ = suffix; + } + + protected: + virtual void ProcessOutput(const Instruction* instr) VIXL_OVERRIDE; + + CPUFeaturesAuditor* cpu_features_auditor_; + const char* cpu_features_prefix_; + const char* cpu_features_suffix_; + + private: + FILE* stream_; +}; +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_AARCH64_DISASM_AARCH64_H diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/instructions-aarch64.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/instructions-aarch64.cc new file mode 100644 index 00000000..a99a0459 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/instructions-aarch64.cc @@ -0,0 +1,713 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "instructions-aarch64.h" +#include "assembler-aarch64.h" + +namespace vixl { +namespace aarch64 { + +static uint64_t RepeatBitsAcrossReg(unsigned reg_size, + uint64_t value, + unsigned width) { + VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) || + (width == 32)); + VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); + uint64_t result = value & ((UINT64_C(1) << width) - 1); + for (unsigned i = width; i < reg_size; i *= 2) { + result |= (result << i); + } + return result; +} + + +bool Instruction::IsLoad() const { + if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) { + return false; + } + + if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { + return Mask(LoadStorePairLBit) != 0; + } else { + LoadStoreOp op = static_cast(Mask(LoadStoreMask)); + switch (op) { + case LDRB_w: + case LDRH_w: + case LDR_w: + case LDR_x: + case LDRSB_w: + case LDRSB_x: + case LDRSH_w: + case LDRSH_x: + case LDRSW_x: + case LDR_b: + case LDR_h: + case LDR_s: + case LDR_d: + case LDR_q: + return true; + default: + return false; + } + } +} + + +bool Instruction::IsStore() const { + if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) { + return false; + } + + if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) { + return Mask(LoadStorePairLBit) == 0; + } else { + LoadStoreOp op = static_cast(Mask(LoadStoreMask)); + switch (op) { + case STRB_w: + case STRH_w: + case STR_w: + case STR_x: + case STR_b: + case STR_h: + case STR_s: + case STR_d: + case STR_q: + return true; + default: + return false; + } + } +} + + +// Logical immediates can't encode zero, so a return value of zero is used to +// indicate a failure case. Specifically, where the constraints on imm_s are +// not met. +uint64_t Instruction::GetImmLogical() const { + unsigned reg_size = GetSixtyFourBits() ? kXRegSize : kWRegSize; + int32_t n = GetBitN(); + int32_t imm_s = GetImmSetBits(); + int32_t imm_r = GetImmRotate(); + + // An integer is constructed from the n, imm_s and imm_r bits according to + // the following table: + // + // N imms immr size S R + // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) + // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) + // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) + // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) + // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) + // 0 11110s xxxxxr 2 UInt(s) UInt(r) + // (s bits must not be all set) + // + // A pattern is constructed of size bits, where the least significant S+1 + // bits are set. The pattern is rotated right by R, and repeated across a + // 32 or 64-bit value, depending on destination register width. + // + + if (n == 1) { + if (imm_s == 0x3f) { + return 0; + } + uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1; + return RotateRight(bits, imm_r, 64); + } else { + if ((imm_s >> 1) == 0x1f) { + return 0; + } + for (int width = 0x20; width >= 0x2; width >>= 1) { + if ((imm_s & width) == 0) { + int mask = width - 1; + if ((imm_s & mask) == mask) { + return 0; + } + uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1; + return RepeatBitsAcrossReg(reg_size, + RotateRight(bits, imm_r & mask, width), + width); + } + } + } + VIXL_UNREACHABLE(); + return 0; +} + + +uint32_t Instruction::GetImmNEONabcdefgh() const { + return GetImmNEONabc() << 5 | GetImmNEONdefgh(); +} + + +Float16 Instruction::Imm8ToFloat16(uint32_t imm8) { + // Imm8: abcdefgh (8 bits) + // Half: aBbb.cdef.gh00.0000 (16 bits) + // where B is b ^ 1 + uint32_t bits = imm8; + uint16_t bit7 = (bits >> 7) & 0x1; + uint16_t bit6 = (bits >> 6) & 0x1; + uint16_t bit5_to_0 = bits & 0x3f; + uint16_t result = (bit7 << 15) | ((4 - bit6) << 12) | (bit5_to_0 << 6); + return RawbitsToFloat16(result); +} + + +float Instruction::Imm8ToFP32(uint32_t imm8) { + // Imm8: abcdefgh (8 bits) + // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits) + // where B is b ^ 1 + uint32_t bits = imm8; + uint32_t bit7 = (bits >> 7) & 0x1; + uint32_t bit6 = (bits >> 6) & 0x1; + uint32_t bit5_to_0 = bits & 0x3f; + uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19); + + return RawbitsToFloat(result); +} + + +Float16 Instruction::GetImmFP16() const { return Imm8ToFloat16(GetImmFP()); } + + +float Instruction::GetImmFP32() const { return Imm8ToFP32(GetImmFP()); } + + +double Instruction::Imm8ToFP64(uint32_t imm8) { + // Imm8: abcdefgh (8 bits) + // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 + // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits) + // where B is b ^ 1 + uint32_t bits = imm8; + uint64_t bit7 = (bits >> 7) & 0x1; + uint64_t bit6 = (bits >> 6) & 0x1; + uint64_t bit5_to_0 = bits & 0x3f; + uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48); + + return RawbitsToDouble(result); +} + + +double Instruction::GetImmFP64() const { return Imm8ToFP64(GetImmFP()); } + + +Float16 Instruction::GetImmNEONFP16() const { + return Imm8ToFloat16(GetImmNEONabcdefgh()); +} + + +float Instruction::GetImmNEONFP32() const { + return Imm8ToFP32(GetImmNEONabcdefgh()); +} + + +double Instruction::GetImmNEONFP64() const { + return Imm8ToFP64(GetImmNEONabcdefgh()); +} + + +unsigned CalcLSDataSize(LoadStoreOp op) { + VIXL_ASSERT((LSSize_offset + LSSize_width) == (kInstructionSize * 8)); + unsigned size = static_cast(op) >> LSSize_offset; + if ((op & LSVector_mask) != 0) { + // Vector register memory operations encode the access size in the "size" + // and "opc" fields. + if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) { + size = kQRegSizeInBytesLog2; + } + } + return size; +} + + +unsigned CalcLSPairDataSize(LoadStorePairOp op) { + VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes); + VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes); + switch (op) { + case STP_q: + case LDP_q: + return kQRegSizeInBytesLog2; + case STP_x: + case LDP_x: + case STP_d: + case LDP_d: + return kXRegSizeInBytesLog2; + default: + return kWRegSizeInBytesLog2; + } +} + + +int Instruction::GetImmBranchRangeBitwidth(ImmBranchType branch_type) { + switch (branch_type) { + case UncondBranchType: + return ImmUncondBranch_width; + case CondBranchType: + return ImmCondBranch_width; + case CompareBranchType: + return ImmCmpBranch_width; + case TestBranchType: + return ImmTestBranch_width; + default: + VIXL_UNREACHABLE(); + return 0; + } +} + + +int32_t Instruction::GetImmBranchForwardRange(ImmBranchType branch_type) { + int32_t encoded_max = 1 << (GetImmBranchRangeBitwidth(branch_type) - 1); + return encoded_max * kInstructionSize; +} + + +bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type, + int64_t offset) { + return IsIntN(GetImmBranchRangeBitwidth(branch_type), offset); +} + + +const Instruction* Instruction::GetImmPCOffsetTarget() const { + const Instruction* base = this; + ptrdiff_t offset; + if (IsPCRelAddressing()) { + // ADR and ADRP. + offset = GetImmPCRel(); + if (Mask(PCRelAddressingMask) == ADRP) { + base = AlignDown(base, kPageSize); + offset *= kPageSize; + } else { + VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR); + } + } else { + // All PC-relative branches. + VIXL_ASSERT(GetBranchType() != UnknownBranchType); + // Relative branch offsets are instruction-size-aligned. + offset = GetImmBranch() * static_cast(kInstructionSize); + } + return base + offset; +} + + +int Instruction::GetImmBranch() const { + switch (GetBranchType()) { + case CondBranchType: + return GetImmCondBranch(); + case UncondBranchType: + return GetImmUncondBranch(); + case CompareBranchType: + return GetImmCmpBranch(); + case TestBranchType: + return GetImmTestBranch(); + default: + VIXL_UNREACHABLE(); + } + return 0; +} + + +void Instruction::SetImmPCOffsetTarget(const Instruction* target) { + if (IsPCRelAddressing()) { + SetPCRelImmTarget(target); + } else { + SetBranchImmTarget(target); + } +} + + +void Instruction::SetPCRelImmTarget(const Instruction* target) { + ptrdiff_t imm21; + if ((Mask(PCRelAddressingMask) == ADR)) { + imm21 = target - this; + } else { + VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP); + uintptr_t this_page = reinterpret_cast(this) / kPageSize; + uintptr_t target_page = reinterpret_cast(target) / kPageSize; + imm21 = target_page - this_page; + } + Instr imm = Assembler::ImmPCRelAddress(static_cast(imm21)); + + SetInstructionBits(Mask(~ImmPCRel_mask) | imm); +} + + +void Instruction::SetBranchImmTarget(const Instruction* target) { + VIXL_ASSERT(((target - this) & 3) == 0); + Instr branch_imm = 0; + uint32_t imm_mask = 0; + int offset = static_cast((target - this) >> kInstructionSizeLog2); + switch (GetBranchType()) { + case CondBranchType: { + branch_imm = Assembler::ImmCondBranch(offset); + imm_mask = ImmCondBranch_mask; + break; + } + case UncondBranchType: { + branch_imm = Assembler::ImmUncondBranch(offset); + imm_mask = ImmUncondBranch_mask; + break; + } + case CompareBranchType: { + branch_imm = Assembler::ImmCmpBranch(offset); + imm_mask = ImmCmpBranch_mask; + break; + } + case TestBranchType: { + branch_imm = Assembler::ImmTestBranch(offset); + imm_mask = ImmTestBranch_mask; + break; + } + default: + VIXL_UNREACHABLE(); + } + SetInstructionBits(Mask(~imm_mask) | branch_imm); +} + + +void Instruction::SetImmLLiteral(const Instruction* source) { + VIXL_ASSERT(IsWordAligned(source)); + ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2; + Instr imm = Assembler::ImmLLiteral(static_cast(offset)); + Instr mask = ImmLLiteral_mask; + + SetInstructionBits(Mask(~mask) | imm); +} + + +VectorFormat VectorFormatHalfWidth(VectorFormat vform) { + VIXL_ASSERT(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D || + vform == kFormatH || vform == kFormatS || vform == kFormatD); + switch (vform) { + case kFormat8H: + return kFormat8B; + case kFormat4S: + return kFormat4H; + case kFormat2D: + return kFormat2S; + case kFormatH: + return kFormatB; + case kFormatS: + return kFormatH; + case kFormatD: + return kFormatS; + default: + VIXL_UNREACHABLE(); + return kFormatUndefined; + } +} + + +VectorFormat VectorFormatDoubleWidth(VectorFormat vform) { + VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S || + vform == kFormatB || vform == kFormatH || vform == kFormatS); + switch (vform) { + case kFormat8B: + return kFormat8H; + case kFormat4H: + return kFormat4S; + case kFormat2S: + return kFormat2D; + case kFormatB: + return kFormatH; + case kFormatH: + return kFormatS; + case kFormatS: + return kFormatD; + default: + VIXL_UNREACHABLE(); + return kFormatUndefined; + } +} + + +VectorFormat VectorFormatFillQ(VectorFormat vform) { + switch (vform) { + case kFormatB: + case kFormat8B: + case kFormat16B: + return kFormat16B; + case kFormatH: + case kFormat4H: + case kFormat8H: + return kFormat8H; + case kFormatS: + case kFormat2S: + case kFormat4S: + return kFormat4S; + case kFormatD: + case kFormat1D: + case kFormat2D: + return kFormat2D; + default: + VIXL_UNREACHABLE(); + return kFormatUndefined; + } +} + +VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform) { + switch (vform) { + case kFormat4H: + return kFormat8B; + case kFormat8H: + return kFormat16B; + case kFormat2S: + return kFormat4H; + case kFormat4S: + return kFormat8H; + case kFormat1D: + return kFormat2S; + case kFormat2D: + return kFormat4S; + default: + VIXL_UNREACHABLE(); + return kFormatUndefined; + } +} + +VectorFormat VectorFormatDoubleLanes(VectorFormat vform) { + VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S); + switch (vform) { + case kFormat8B: + return kFormat16B; + case kFormat4H: + return kFormat8H; + case kFormat2S: + return kFormat4S; + default: + VIXL_UNREACHABLE(); + return kFormatUndefined; + } +} + + +VectorFormat VectorFormatHalfLanes(VectorFormat vform) { + VIXL_ASSERT(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S); + switch (vform) { + case kFormat16B: + return kFormat8B; + case kFormat8H: + return kFormat4H; + case kFormat4S: + return kFormat2S; + default: + VIXL_UNREACHABLE(); + return kFormatUndefined; + } +} + + +VectorFormat ScalarFormatFromLaneSize(int laneSize) { + switch (laneSize) { + case 8: + return kFormatB; + case 16: + return kFormatH; + case 32: + return kFormatS; + case 64: + return kFormatD; + default: + VIXL_UNREACHABLE(); + return kFormatUndefined; + } +} + + +VectorFormat ScalarFormatFromFormat(VectorFormat vform) { + return ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform)); +} + + +unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) { + VIXL_ASSERT(vform != kFormatUndefined); + switch (vform) { + case kFormatB: + return kBRegSize; + case kFormatH: + return kHRegSize; + case kFormatS: + case kFormat2H: + return kSRegSize; + case kFormatD: + return kDRegSize; + case kFormat8B: + case kFormat4H: + case kFormat2S: + case kFormat1D: + return kDRegSize; + default: + return kQRegSize; + } +} + + +unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) { + return RegisterSizeInBitsFromFormat(vform) / 8; +} + + +unsigned LaneSizeInBitsFromFormat(VectorFormat vform) { + VIXL_ASSERT(vform != kFormatUndefined); + switch (vform) { + case kFormatB: + case kFormat8B: + case kFormat16B: + return 8; + case kFormatH: + case kFormat2H: + case kFormat4H: + case kFormat8H: + return 16; + case kFormatS: + case kFormat2S: + case kFormat4S: + return 32; + case kFormatD: + case kFormat1D: + case kFormat2D: + return 64; + default: + VIXL_UNREACHABLE(); + return 0; + } +} + + +int LaneSizeInBytesFromFormat(VectorFormat vform) { + return LaneSizeInBitsFromFormat(vform) / 8; +} + + +int LaneSizeInBytesLog2FromFormat(VectorFormat vform) { + VIXL_ASSERT(vform != kFormatUndefined); + switch (vform) { + case kFormatB: + case kFormat8B: + case kFormat16B: + return 0; + case kFormatH: + case kFormat2H: + case kFormat4H: + case kFormat8H: + return 1; + case kFormatS: + case kFormat2S: + case kFormat4S: + return 2; + case kFormatD: + case kFormat1D: + case kFormat2D: + return 3; + default: + VIXL_UNREACHABLE(); + return 0; + } +} + + +int LaneCountFromFormat(VectorFormat vform) { + VIXL_ASSERT(vform != kFormatUndefined); + switch (vform) { + case kFormat16B: + return 16; + case kFormat8B: + case kFormat8H: + return 8; + case kFormat4H: + case kFormat4S: + return 4; + case kFormat2H: + case kFormat2S: + case kFormat2D: + return 2; + case kFormat1D: + case kFormatB: + case kFormatH: + case kFormatS: + case kFormatD: + return 1; + default: + VIXL_UNREACHABLE(); + return 0; + } +} + + +int MaxLaneCountFromFormat(VectorFormat vform) { + VIXL_ASSERT(vform != kFormatUndefined); + switch (vform) { + case kFormatB: + case kFormat8B: + case kFormat16B: + return 16; + case kFormatH: + case kFormat4H: + case kFormat8H: + return 8; + case kFormatS: + case kFormat2S: + case kFormat4S: + return 4; + case kFormatD: + case kFormat1D: + case kFormat2D: + return 2; + default: + VIXL_UNREACHABLE(); + return 0; + } +} + + +// Does 'vform' indicate a vector format or a scalar format? +bool IsVectorFormat(VectorFormat vform) { + VIXL_ASSERT(vform != kFormatUndefined); + switch (vform) { + case kFormatB: + case kFormatH: + case kFormatS: + case kFormatD: + return false; + default: + return true; + } +} + + +int64_t MaxIntFromFormat(VectorFormat vform) { + return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform)); +} + + +int64_t MinIntFromFormat(VectorFormat vform) { + return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform)); +} + + +uint64_t MaxUintFromFormat(VectorFormat vform) { + return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform)); +} +} // namespace aarch64 +} // namespace vixl diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/instructions-aarch64.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/instructions-aarch64.h new file mode 100644 index 00000000..759d03b3 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/instructions-aarch64.h @@ -0,0 +1,896 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_ +#define VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_ + +#include "../globals-vixl.h" +#include "../utils-vixl.h" + +#include "constants-aarch64.h" + +namespace vixl { +namespace aarch64 { +// ISA constants. -------------------------------------------------------------- + +typedef uint32_t Instr; +const unsigned kInstructionSize = 4; +const unsigned kInstructionSizeLog2 = 2; +const unsigned kLiteralEntrySize = 4; +const unsigned kLiteralEntrySizeLog2 = 2; +const unsigned kMaxLoadLiteralRange = 1 * MBytes; + +// This is the nominal page size (as used by the adrp instruction); the actual +// size of the memory pages allocated by the kernel is likely to differ. +const unsigned kPageSize = 4 * KBytes; +const unsigned kPageSizeLog2 = 12; + +const unsigned kBRegSize = 8; +const unsigned kBRegSizeLog2 = 3; +const unsigned kBRegSizeInBytes = kBRegSize / 8; +const unsigned kBRegSizeInBytesLog2 = kBRegSizeLog2 - 3; +const unsigned kHRegSize = 16; +const unsigned kHRegSizeLog2 = 4; +const unsigned kHRegSizeInBytes = kHRegSize / 8; +const unsigned kHRegSizeInBytesLog2 = kHRegSizeLog2 - 3; +const unsigned kWRegSize = 32; +const unsigned kWRegSizeLog2 = 5; +const unsigned kWRegSizeInBytes = kWRegSize / 8; +const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3; +const unsigned kXRegSize = 64; +const unsigned kXRegSizeLog2 = 6; +const unsigned kXRegSizeInBytes = kXRegSize / 8; +const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3; +const unsigned kSRegSize = 32; +const unsigned kSRegSizeLog2 = 5; +const unsigned kSRegSizeInBytes = kSRegSize / 8; +const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3; +const unsigned kDRegSize = 64; +const unsigned kDRegSizeLog2 = 6; +const unsigned kDRegSizeInBytes = kDRegSize / 8; +const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3; +const unsigned kQRegSize = 128; +const unsigned kQRegSizeLog2 = 7; +const unsigned kQRegSizeInBytes = kQRegSize / 8; +const unsigned kQRegSizeInBytesLog2 = kQRegSizeLog2 - 3; +const uint64_t kWRegMask = UINT64_C(0xffffffff); +const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff); +const uint64_t kHRegMask = UINT64_C(0xffff); +const uint64_t kSRegMask = UINT64_C(0xffffffff); +const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff); +const uint64_t kSSignMask = UINT64_C(0x80000000); +const uint64_t kDSignMask = UINT64_C(0x8000000000000000); +const uint64_t kWSignMask = UINT64_C(0x80000000); +const uint64_t kXSignMask = UINT64_C(0x8000000000000000); +const uint64_t kByteMask = UINT64_C(0xff); +const uint64_t kHalfWordMask = UINT64_C(0xffff); +const uint64_t kWordMask = UINT64_C(0xffffffff); +const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff); +const uint64_t kWMaxUInt = UINT64_C(0xffffffff); +const uint64_t kHMaxUInt = UINT64_C(0xffff); +// Define k*MinInt with "-k*MaxInt - 1", because the hexadecimal representation +// (e.g. "INT32_C(0x80000000)") has implementation-defined behaviour. +const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff); +const int64_t kXMinInt = -kXMaxInt - 1; +const int32_t kWMaxInt = INT32_C(0x7fffffff); +const int32_t kWMinInt = -kWMaxInt - 1; +const int16_t kHMaxInt = INT16_C(0x7fff); +const int16_t kHMinInt = -kHMaxInt - 1; +const unsigned kFpRegCode = 29; +const unsigned kLinkRegCode = 30; +const unsigned kSpRegCode = 31; +const unsigned kZeroRegCode = 31; +const unsigned kSPRegInternalCode = 63; +const unsigned kRegCodeMask = 0x1f; + +const unsigned kAtomicAccessGranule = 16; + +const unsigned kAddressTagOffset = 56; +const unsigned kAddressTagWidth = 8; +const uint64_t kAddressTagMask = ((UINT64_C(1) << kAddressTagWidth) - 1) + << kAddressTagOffset; +VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000)); + +const uint64_t kTTBRMask = UINT64_C(1) << 55; + +// Make these moved float constants backwards compatible +// with explicit vixl::aarch64:: namespace references. +using vixl::kDoubleMantissaBits; +using vixl::kDoubleExponentBits; +using vixl::kFloatMantissaBits; +using vixl::kFloatExponentBits; +using vixl::kFloat16MantissaBits; +using vixl::kFloat16ExponentBits; + +using vixl::kFP16PositiveInfinity; +using vixl::kFP16NegativeInfinity; +using vixl::kFP32PositiveInfinity; +using vixl::kFP32NegativeInfinity; +using vixl::kFP64PositiveInfinity; +using vixl::kFP64NegativeInfinity; + +using vixl::kFP16DefaultNaN; +using vixl::kFP32DefaultNaN; +using vixl::kFP64DefaultNaN; + +unsigned CalcLSDataSize(LoadStoreOp op); +unsigned CalcLSPairDataSize(LoadStorePairOp op); + +enum ImmBranchType { + UnknownBranchType = 0, + CondBranchType = 1, + UncondBranchType = 2, + CompareBranchType = 3, + TestBranchType = 4 +}; + +enum AddrMode { Offset, PreIndex, PostIndex }; + +enum Reg31Mode { Reg31IsStackPointer, Reg31IsZeroRegister }; + +// Instructions. --------------------------------------------------------------- + +class Instruction { + public: + Instr GetInstructionBits() const { + return *(reinterpret_cast(this)); + } + VIXL_DEPRECATED("GetInstructionBits", Instr InstructionBits() const) { + return GetInstructionBits(); + } + + void SetInstructionBits(Instr new_instr) { + *(reinterpret_cast(this)) = new_instr; + } + + int ExtractBit(int pos) const { return (GetInstructionBits() >> pos) & 1; } + VIXL_DEPRECATED("ExtractBit", int Bit(int pos) const) { + return ExtractBit(pos); + } + + uint32_t ExtractBits(int msb, int lsb) const { + return ExtractUnsignedBitfield32(msb, lsb, GetInstructionBits()); + } + VIXL_DEPRECATED("ExtractBits", uint32_t Bits(int msb, int lsb) const) { + return ExtractBits(msb, lsb); + } + + int32_t ExtractSignedBits(int msb, int lsb) const { + int32_t bits = *(reinterpret_cast(this)); + return ExtractSignedBitfield32(msb, lsb, bits); + } + VIXL_DEPRECATED("ExtractSignedBits", + int32_t SignedBits(int msb, int lsb) const) { + return ExtractSignedBits(msb, lsb); + } + + Instr Mask(uint32_t mask) const { + VIXL_ASSERT(mask != 0); + return GetInstructionBits() & mask; + } + +#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \ + int32_t Get##Name() const { return this->Func(HighBit, LowBit); } \ + VIXL_DEPRECATED("Get" #Name, int32_t Name() const) { return Get##Name(); } + INSTRUCTION_FIELDS_LIST(DEFINE_GETTER) +#undef DEFINE_GETTER + + // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), + // formed from ImmPCRelLo and ImmPCRelHi. + int GetImmPCRel() const { + uint32_t hi = static_cast(GetImmPCRelHi()); + uint32_t lo = GetImmPCRelLo(); + uint32_t offset = (hi << ImmPCRelLo_width) | lo; + int width = ImmPCRelLo_width + ImmPCRelHi_width; + return ExtractSignedBitfield32(width - 1, 0, offset); + } + VIXL_DEPRECATED("GetImmPCRel", int ImmPCRel() const) { return GetImmPCRel(); } + + // ImmLSPAC is a compound field (not present in INSTRUCTION_FIELDS_LIST), + // formed from ImmLSPACLo and ImmLSPACHi. + int GetImmLSPAC() const { + uint32_t hi = static_cast(GetImmLSPACHi()); + uint32_t lo = GetImmLSPACLo(); + uint32_t offset = (hi << ImmLSPACLo_width) | lo; + int width = ImmLSPACLo_width + ImmLSPACHi_width; + return ExtractSignedBitfield32(width - 1, 0, offset) << 3; + } + + uint64_t GetImmLogical() const; + VIXL_DEPRECATED("GetImmLogical", uint64_t ImmLogical() const) { + return GetImmLogical(); + } + + unsigned GetImmNEONabcdefgh() const; + VIXL_DEPRECATED("GetImmNEONabcdefgh", unsigned ImmNEONabcdefgh() const) { + return GetImmNEONabcdefgh(); + } + + Float16 GetImmFP16() const; + + float GetImmFP32() const; + VIXL_DEPRECATED("GetImmFP32", float ImmFP32() const) { return GetImmFP32(); } + + double GetImmFP64() const; + VIXL_DEPRECATED("GetImmFP64", double ImmFP64() const) { return GetImmFP64(); } + + Float16 GetImmNEONFP16() const; + + float GetImmNEONFP32() const; + VIXL_DEPRECATED("GetImmNEONFP32", float ImmNEONFP32() const) { + return GetImmNEONFP32(); + } + + double GetImmNEONFP64() const; + VIXL_DEPRECATED("GetImmNEONFP64", double ImmNEONFP64() const) { + return GetImmNEONFP64(); + } + + unsigned GetSizeLS() const { + return CalcLSDataSize(static_cast(Mask(LoadStoreMask))); + } + VIXL_DEPRECATED("GetSizeLS", unsigned SizeLS() const) { return GetSizeLS(); } + + unsigned GetSizeLSPair() const { + return CalcLSPairDataSize( + static_cast(Mask(LoadStorePairMask))); + } + VIXL_DEPRECATED("GetSizeLSPair", unsigned SizeLSPair() const) { + return GetSizeLSPair(); + } + + int GetNEONLSIndex(int access_size_shift) const { + int64_t q = GetNEONQ(); + int64_t s = GetNEONS(); + int64_t size = GetNEONLSSize(); + int64_t index = (q << 3) | (s << 2) | size; + return static_cast(index >> access_size_shift); + } + VIXL_DEPRECATED("GetNEONLSIndex", + int NEONLSIndex(int access_size_shift) const) { + return GetNEONLSIndex(access_size_shift); + } + + // Helpers. + bool IsCondBranchImm() const { + return Mask(ConditionalBranchFMask) == ConditionalBranchFixed; + } + + bool IsUncondBranchImm() const { + return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed; + } + + bool IsCompareBranch() const { + return Mask(CompareBranchFMask) == CompareBranchFixed; + } + + bool IsTestBranch() const { return Mask(TestBranchFMask) == TestBranchFixed; } + + bool IsImmBranch() const { return GetBranchType() != UnknownBranchType; } + + bool IsPCRelAddressing() const { + return Mask(PCRelAddressingFMask) == PCRelAddressingFixed; + } + + bool IsLogicalImmediate() const { + return Mask(LogicalImmediateFMask) == LogicalImmediateFixed; + } + + bool IsAddSubImmediate() const { + return Mask(AddSubImmediateFMask) == AddSubImmediateFixed; + } + + bool IsAddSubExtended() const { + return Mask(AddSubExtendedFMask) == AddSubExtendedFixed; + } + + bool IsLoadOrStore() const { + return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed; + } + + bool IsLoad() const; + bool IsStore() const; + + bool IsLoadLiteral() const { + // This includes PRFM_lit. + return Mask(LoadLiteralFMask) == LoadLiteralFixed; + } + + bool IsMovn() const { + return (Mask(MoveWideImmediateMask) == MOVN_x) || + (Mask(MoveWideImmediateMask) == MOVN_w); + } + + bool IsException() const { return Mask(ExceptionFMask) == ExceptionFixed; } + + bool IsPAuth() const { return Mask(SystemPAuthFMask) == SystemPAuthFixed; } + + bool IsBti() const { + if (Mask(SystemHintFMask) == SystemHintFixed) { + int imm_hint = GetImmHint(); + switch (imm_hint) { + case BTI: + case BTI_c: + case BTI_j: + case BTI_jc: + return true; + } + } + return false; + } + + static int GetImmBranchRangeBitwidth(ImmBranchType branch_type); + VIXL_DEPRECATED( + "GetImmBranchRangeBitwidth", + static int ImmBranchRangeBitwidth(ImmBranchType branch_type)) { + return GetImmBranchRangeBitwidth(branch_type); + } + + static int32_t GetImmBranchForwardRange(ImmBranchType branch_type); + VIXL_DEPRECATED( + "GetImmBranchForwardRange", + static int32_t ImmBranchForwardRange(ImmBranchType branch_type)) { + return GetImmBranchForwardRange(branch_type); + } + + static bool IsValidImmPCOffset(ImmBranchType branch_type, int64_t offset); + + // Indicate whether Rd can be the stack pointer or the zero register. This + // does not check that the instruction actually has an Rd field. + Reg31Mode GetRdMode() const { + // The following instructions use sp or wsp as Rd: + // Add/sub (immediate) when not setting the flags. + // Add/sub (extended) when not setting the flags. + // Logical (immediate) when not setting the flags. + // Otherwise, r31 is the zero register. + if (IsAddSubImmediate() || IsAddSubExtended()) { + if (Mask(AddSubSetFlagsBit)) { + return Reg31IsZeroRegister; + } else { + return Reg31IsStackPointer; + } + } + if (IsLogicalImmediate()) { + // Of the logical (immediate) instructions, only ANDS (and its aliases) + // can set the flags. The others can all write into sp. + // Note that some logical operations are not available to + // immediate-operand instructions, so we have to combine two masks here. + if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) { + return Reg31IsZeroRegister; + } else { + return Reg31IsStackPointer; + } + } + return Reg31IsZeroRegister; + } + VIXL_DEPRECATED("GetRdMode", Reg31Mode RdMode() const) { return GetRdMode(); } + + // Indicate whether Rn can be the stack pointer or the zero register. This + // does not check that the instruction actually has an Rn field. + Reg31Mode GetRnMode() const { + // The following instructions use sp or wsp as Rn: + // All loads and stores. + // Add/sub (immediate). + // Add/sub (extended). + // Otherwise, r31 is the zero register. + if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) { + return Reg31IsStackPointer; + } + return Reg31IsZeroRegister; + } + VIXL_DEPRECATED("GetRnMode", Reg31Mode RnMode() const) { return GetRnMode(); } + + ImmBranchType GetBranchType() const { + if (IsCondBranchImm()) { + return CondBranchType; + } else if (IsUncondBranchImm()) { + return UncondBranchType; + } else if (IsCompareBranch()) { + return CompareBranchType; + } else if (IsTestBranch()) { + return TestBranchType; + } else { + return UnknownBranchType; + } + } + VIXL_DEPRECATED("GetBranchType", ImmBranchType BranchType() const) { + return GetBranchType(); + } + + // Find the target of this instruction. 'this' may be a branch or a + // PC-relative addressing instruction. + const Instruction* GetImmPCOffsetTarget() const; + VIXL_DEPRECATED("GetImmPCOffsetTarget", + const Instruction* ImmPCOffsetTarget() const) { + return GetImmPCOffsetTarget(); + } + + // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or + // a PC-relative addressing instruction. + void SetImmPCOffsetTarget(const Instruction* target); + // Patch a literal load instruction to load from 'source'. + void SetImmLLiteral(const Instruction* source); + + // The range of a load literal instruction, expressed as 'instr +- range'. + // The range is actually the 'positive' range; the branch instruction can + // target [instr - range - kInstructionSize, instr + range]. + static const int kLoadLiteralImmBitwidth = 19; + static const int kLoadLiteralRange = + (1 << kLoadLiteralImmBitwidth) / 2 - kInstructionSize; + + // Calculate the address of a literal referred to by a load-literal + // instruction, and return it as the specified type. + // + // The literal itself is safely mutable only if the backing buffer is safely + // mutable. + template + T GetLiteralAddress() const { + uint64_t base_raw = reinterpret_cast(this); + int64_t offset = GetImmLLiteral() * static_cast(kLiteralEntrySize); + uint64_t address_raw = base_raw + offset; + + // Cast the address using a C-style cast. A reinterpret_cast would be + // appropriate, but it can't cast one integral type to another. + T address = (T)(address_raw); + + // Assert that the address can be represented by the specified type. + VIXL_ASSERT((uint64_t)(address) == address_raw); + + return address; + } + template + VIXL_DEPRECATED("GetLiteralAddress", T LiteralAddress() const) { + return GetLiteralAddress(); + } + + uint32_t GetLiteral32() const { + uint32_t literal; + memcpy(&literal, GetLiteralAddress(), sizeof(literal)); + return literal; + } + VIXL_DEPRECATED("GetLiteral32", uint32_t Literal32() const) { + return GetLiteral32(); + } + + uint64_t GetLiteral64() const { + uint64_t literal; + memcpy(&literal, GetLiteralAddress(), sizeof(literal)); + return literal; + } + VIXL_DEPRECATED("GetLiteral64", uint64_t Literal64() const) { + return GetLiteral64(); + } + + float GetLiteralFP32() const { return RawbitsToFloat(GetLiteral32()); } + VIXL_DEPRECATED("GetLiteralFP32", float LiteralFP32() const) { + return GetLiteralFP32(); + } + + double GetLiteralFP64() const { return RawbitsToDouble(GetLiteral64()); } + VIXL_DEPRECATED("GetLiteralFP64", double LiteralFP64() const) { + return GetLiteralFP64(); + } + + Instruction* GetNextInstruction() { return this + kInstructionSize; } + const Instruction* GetNextInstruction() const { + return this + kInstructionSize; + } + VIXL_DEPRECATED("GetNextInstruction", + const Instruction* NextInstruction() const) { + return GetNextInstruction(); + } + + const Instruction* GetInstructionAtOffset(int64_t offset) const { + VIXL_ASSERT(IsWordAligned(this + offset)); + return this + offset; + } + VIXL_DEPRECATED("GetInstructionAtOffset", + const Instruction* InstructionAtOffset(int64_t offset) + const) { + return GetInstructionAtOffset(offset); + } + + template + static Instruction* Cast(T src) { + return reinterpret_cast(src); + } + + template + static const Instruction* CastConst(T src) { + return reinterpret_cast(src); + } + + private: + int GetImmBranch() const; + + static Float16 Imm8ToFloat16(uint32_t imm8); + static float Imm8ToFP32(uint32_t imm8); + static double Imm8ToFP64(uint32_t imm8); + + void SetPCRelImmTarget(const Instruction* target); + void SetBranchImmTarget(const Instruction* target); +}; + + +// Functions for handling NEON vector format information. +enum VectorFormat { + kFormatUndefined = 0xffffffff, + kFormat8B = NEON_8B, + kFormat16B = NEON_16B, + kFormat4H = NEON_4H, + kFormat8H = NEON_8H, + kFormat2S = NEON_2S, + kFormat4S = NEON_4S, + kFormat1D = NEON_1D, + kFormat2D = NEON_2D, + + // Scalar formats. We add the scalar bit to distinguish between scalar and + // vector enumerations; the bit is always set in the encoding of scalar ops + // and always clear for vector ops. Although kFormatD and kFormat1D appear + // to be the same, their meaning is subtly different. The first is a scalar + // operation, the second a vector operation that only affects one lane. + kFormatB = NEON_B | NEONScalar, + kFormatH = NEON_H | NEONScalar, + kFormatS = NEON_S | NEONScalar, + kFormatD = NEON_D | NEONScalar, + + // An artificial value, used by simulator trace tests and a few oddball + // instructions (such as FMLAL). + kFormat2H = 0xfffffffe +}; + +const int kMaxLanesPerVector = 16; + +VectorFormat VectorFormatHalfWidth(VectorFormat vform); +VectorFormat VectorFormatDoubleWidth(VectorFormat vform); +VectorFormat VectorFormatDoubleLanes(VectorFormat vform); +VectorFormat VectorFormatHalfLanes(VectorFormat vform); +VectorFormat ScalarFormatFromLaneSize(int lanesize); +VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform); +VectorFormat VectorFormatFillQ(VectorFormat vform); +VectorFormat ScalarFormatFromFormat(VectorFormat vform); +unsigned RegisterSizeInBitsFromFormat(VectorFormat vform); +unsigned RegisterSizeInBytesFromFormat(VectorFormat vform); +// TODO: Make the return types of these functions consistent. +unsigned LaneSizeInBitsFromFormat(VectorFormat vform); +int LaneSizeInBytesFromFormat(VectorFormat vform); +int LaneSizeInBytesLog2FromFormat(VectorFormat vform); +int LaneCountFromFormat(VectorFormat vform); +int MaxLaneCountFromFormat(VectorFormat vform); +bool IsVectorFormat(VectorFormat vform); +int64_t MaxIntFromFormat(VectorFormat vform); +int64_t MinIntFromFormat(VectorFormat vform); +uint64_t MaxUintFromFormat(VectorFormat vform); + + +// clang-format off +enum NEONFormat { + NF_UNDEF = 0, + NF_8B = 1, + NF_16B = 2, + NF_4H = 3, + NF_8H = 4, + NF_2S = 5, + NF_4S = 6, + NF_1D = 7, + NF_2D = 8, + NF_B = 9, + NF_H = 10, + NF_S = 11, + NF_D = 12 +}; +// clang-format on + +static const unsigned kNEONFormatMaxBits = 6; + +struct NEONFormatMap { + // The bit positions in the instruction to consider. + uint8_t bits[kNEONFormatMaxBits]; + + // Mapping from concatenated bits to format. + NEONFormat map[1 << kNEONFormatMaxBits]; +}; + +class NEONFormatDecoder { + public: + enum SubstitutionMode { kPlaceholder, kFormat }; + + // Construct a format decoder with increasingly specific format maps for each + // subsitution. If no format map is specified, the default is the integer + // format map. + explicit NEONFormatDecoder(const Instruction* instr) { + instrbits_ = instr->GetInstructionBits(); + SetFormatMaps(IntegerFormatMap()); + } + NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format) { + instrbits_ = instr->GetInstructionBits(); + SetFormatMaps(format); + } + NEONFormatDecoder(const Instruction* instr, + const NEONFormatMap* format0, + const NEONFormatMap* format1) { + instrbits_ = instr->GetInstructionBits(); + SetFormatMaps(format0, format1); + } + NEONFormatDecoder(const Instruction* instr, + const NEONFormatMap* format0, + const NEONFormatMap* format1, + const NEONFormatMap* format2) { + instrbits_ = instr->GetInstructionBits(); + SetFormatMaps(format0, format1, format2); + } + + // Set the format mapping for all or individual substitutions. + void SetFormatMaps(const NEONFormatMap* format0, + const NEONFormatMap* format1 = NULL, + const NEONFormatMap* format2 = NULL) { + VIXL_ASSERT(format0 != NULL); + formats_[0] = format0; + formats_[1] = (format1 == NULL) ? formats_[0] : format1; + formats_[2] = (format2 == NULL) ? formats_[1] : format2; + } + void SetFormatMap(unsigned index, const NEONFormatMap* format) { + VIXL_ASSERT(index <= ArrayLength(formats_)); + VIXL_ASSERT(format != NULL); + formats_[index] = format; + } + + // Substitute %s in the input string with the placeholder string for each + // register, ie. "'B", "'H", etc. + const char* SubstitutePlaceholders(const char* string) { + return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder); + } + + // Substitute %s in the input string with a new string based on the + // substitution mode. + const char* Substitute(const char* string, + SubstitutionMode mode0 = kFormat, + SubstitutionMode mode1 = kFormat, + SubstitutionMode mode2 = kFormat) { + snprintf(form_buffer_, + sizeof(form_buffer_), + string, + GetSubstitute(0, mode0), + GetSubstitute(1, mode1), + GetSubstitute(2, mode2)); + return form_buffer_; + } + + // Append a "2" to a mnemonic string based of the state of the Q bit. + const char* Mnemonic(const char* mnemonic) { + if ((instrbits_ & NEON_Q) != 0) { + snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic); + return mne_buffer_; + } + return mnemonic; + } + + VectorFormat GetVectorFormat(int format_index = 0) { + return GetVectorFormat(formats_[format_index]); + } + + VectorFormat GetVectorFormat(const NEONFormatMap* format_map) { + static const VectorFormat vform[] = {kFormatUndefined, + kFormat8B, + kFormat16B, + kFormat4H, + kFormat8H, + kFormat2S, + kFormat4S, + kFormat1D, + kFormat2D, + kFormatB, + kFormatH, + kFormatS, + kFormatD}; + VIXL_ASSERT(GetNEONFormat(format_map) < ArrayLength(vform)); + return vform[GetNEONFormat(format_map)]; + } + + // Built in mappings for common cases. + + // The integer format map uses three bits (Q, size<1:0>) to encode the + // "standard" set of NEON integer vector formats. + static const NEONFormatMap* IntegerFormatMap() { + static const NEONFormatMap map = + {{23, 22, 30}, + {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}}; + return ↦ + } + + // The long integer format map uses two bits (size<1:0>) to encode the + // long set of NEON integer vector formats. These are used in narrow, wide + // and long operations. + static const NEONFormatMap* LongIntegerFormatMap() { + static const NEONFormatMap map = {{23, 22}, {NF_8H, NF_4S, NF_2D}}; + return ↦ + } + + // The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector + // formats: NF_2S, NF_4S, NF_2D. + static const NEONFormatMap* FPFormatMap() { + // The FP format map assumes two bits (Q, size<0>) are used to encode the + // NEON FP vector formats: NF_2S, NF_4S, NF_2D. + static const NEONFormatMap map = {{22, 30}, + {NF_2S, NF_4S, NF_UNDEF, NF_2D}}; + return ↦ + } + + // The FP16 format map uses one bit (Q) to encode the NEON vector format: + // NF_4H, NF_8H. + static const NEONFormatMap* FP16FormatMap() { + static const NEONFormatMap map = {{30}, {NF_4H, NF_8H}}; + return ↦ + } + + // The load/store format map uses three bits (Q, 11, 10) to encode the + // set of NEON vector formats. + static const NEONFormatMap* LoadStoreFormatMap() { + static const NEONFormatMap map = + {{11, 10, 30}, + {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}}; + return ↦ + } + + // The logical format map uses one bit (Q) to encode the NEON vector format: + // NF_8B, NF_16B. + static const NEONFormatMap* LogicalFormatMap() { + static const NEONFormatMap map = {{30}, {NF_8B, NF_16B}}; + return ↦ + } + + // The triangular format map uses between two and five bits to encode the NEON + // vector format: + // xxx10->8B, xxx11->16B, xx100->4H, xx101->8H + // x1000->2S, x1001->4S, 10001->2D, all others undefined. + static const NEONFormatMap* TriangularFormatMap() { + static const NEONFormatMap map = + {{19, 18, 17, 16, 30}, + {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, + NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, + NF_UNDEF, NF_2D, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, + NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B}}; + return ↦ + } + + // The scalar format map uses two bits (size<1:0>) to encode the NEON scalar + // formats: NF_B, NF_H, NF_S, NF_D. + static const NEONFormatMap* ScalarFormatMap() { + static const NEONFormatMap map = {{23, 22}, {NF_B, NF_H, NF_S, NF_D}}; + return ↦ + } + + // The long scalar format map uses two bits (size<1:0>) to encode the longer + // NEON scalar formats: NF_H, NF_S, NF_D. + static const NEONFormatMap* LongScalarFormatMap() { + static const NEONFormatMap map = {{23, 22}, {NF_H, NF_S, NF_D}}; + return ↦ + } + + // The FP scalar format map assumes one bit (size<0>) is used to encode the + // NEON FP scalar formats: NF_S, NF_D. + static const NEONFormatMap* FPScalarFormatMap() { + static const NEONFormatMap map = {{22}, {NF_S, NF_D}}; + return ↦ + } + + // The FP scalar pairwise format map assumes two bits (U, size<0>) are used to + // encode the NEON FP scalar formats: NF_H, NF_S, NF_D. + static const NEONFormatMap* FPScalarPairwiseFormatMap() { + static const NEONFormatMap map = {{29, 22}, {NF_H, NF_UNDEF, NF_S, NF_D}}; + return ↦ + } + + // The triangular scalar format map uses between one and four bits to encode + // the NEON FP scalar formats: + // xxx1->B, xx10->H, x100->S, 1000->D, all others undefined. + static const NEONFormatMap* TriangularScalarFormatMap() { + static const NEONFormatMap map = {{19, 18, 17, 16}, + {NF_UNDEF, + NF_B, + NF_H, + NF_B, + NF_S, + NF_B, + NF_H, + NF_B, + NF_D, + NF_B, + NF_H, + NF_B, + NF_S, + NF_B, + NF_H, + NF_B}}; + return ↦ + } + + private: + // Get a pointer to a string that represents the format or placeholder for + // the specified substitution index, based on the format map and instruction. + const char* GetSubstitute(int index, SubstitutionMode mode) { + if (mode == kFormat) { + return NEONFormatAsString(GetNEONFormat(formats_[index])); + } + VIXL_ASSERT(mode == kPlaceholder); + return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index])); + } + + // Get the NEONFormat enumerated value for bits obtained from the + // instruction based on the specified format mapping. + NEONFormat GetNEONFormat(const NEONFormatMap* format_map) { + return format_map->map[PickBits(format_map->bits)]; + } + + // Convert a NEONFormat into a string. + static const char* NEONFormatAsString(NEONFormat format) { + // clang-format off + static const char* formats[] = { + "undefined", + "8b", "16b", "4h", "8h", "2s", "4s", "1d", "2d", + "b", "h", "s", "d" + }; + // clang-format on + VIXL_ASSERT(format < ArrayLength(formats)); + return formats[format]; + } + + // Convert a NEONFormat into a register placeholder string. + static const char* NEONFormatAsPlaceholder(NEONFormat format) { + VIXL_ASSERT((format == NF_B) || (format == NF_H) || (format == NF_S) || + (format == NF_D) || (format == NF_UNDEF)); + // clang-format off + static const char* formats[] = { + "undefined", + "undefined", "undefined", "undefined", "undefined", + "undefined", "undefined", "undefined", "undefined", + "'B", "'H", "'S", "'D" + }; + // clang-format on + return formats[format]; + } + + // Select bits from instrbits_ defined by the bits array, concatenate them, + // and return the value. + uint8_t PickBits(const uint8_t bits[]) { + uint8_t result = 0; + for (unsigned b = 0; b < kNEONFormatMaxBits; b++) { + if (bits[b] == 0) break; + result <<= 1; + result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1; + } + return result; + } + + Instr instrbits_; + const NEONFormatMap* formats_[3]; + char form_buffer_[64]; + char mne_buffer_[16]; +}; +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/instrument-aarch64.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/instrument-aarch64.cc new file mode 100644 index 00000000..02a1083e --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/instrument-aarch64.cc @@ -0,0 +1,967 @@ +// Copyright 2014, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "instrument-aarch64.h" + +namespace vixl { +namespace aarch64 { + +Counter::Counter(const char* name, CounterType type) + : count_(0), enabled_(false), type_(type) { + VIXL_ASSERT(name != NULL); + strncpy(name_, name, kCounterNameMaxLength); + // Make sure `name_` is always NULL-terminated, even if the source's length is + // higher. + name_[kCounterNameMaxLength - 1] = '\0'; +} + + +void Counter::Enable() { enabled_ = true; } + + +void Counter::Disable() { enabled_ = false; } + + +bool Counter::IsEnabled() { return enabled_; } + + +void Counter::Increment() { + if (enabled_) { + count_++; + } +} + + +uint64_t Counter::GetCount() { + uint64_t result = count_; + if (type_ == Gauge) { + // If the counter is a Gauge, reset the count after reading. + count_ = 0; + } + return result; +} + + +const char* Counter::GetName() { return name_; } + + +CounterType Counter::GetType() { return type_; } + + +struct CounterDescriptor { + const char* name; + CounterType type; +}; + + +static const CounterDescriptor kCounterList[] = + {{"Instruction", Cumulative}, + + {"Move Immediate", Gauge}, + {"Add/Sub DP", Gauge}, + {"Logical DP", Gauge}, + {"Other Int DP", Gauge}, + {"FP DP", Gauge}, + + {"Conditional Select", Gauge}, + {"Conditional Compare", Gauge}, + + {"Unconditional Branch", Gauge}, + {"Compare and Branch", Gauge}, + {"Test and Branch", Gauge}, + {"Conditional Branch", Gauge}, + + {"Load Integer", Gauge}, + {"Load FP", Gauge}, + {"Load Pair", Gauge}, + {"Load Literal", Gauge}, + + {"Store Integer", Gauge}, + {"Store FP", Gauge}, + {"Store Pair", Gauge}, + + {"PC Addressing", Gauge}, + {"Other", Gauge}, + {"NEON", Gauge}, + {"Crypto", Gauge}}; + + +Instrument::Instrument(const char* datafile, uint64_t sample_period) + : output_stream_(stdout), sample_period_(sample_period) { + // Set up the output stream. If datafile is non-NULL, use that file. If it + // can't be opened, or datafile is NULL, use stdout. + if (datafile != NULL) { + output_stream_ = fopen(datafile, "w"); + if (output_stream_ == NULL) { + printf("Can't open output file %s. Using stdout.\n", datafile); + output_stream_ = stdout; + } + } + + static const int num_counters = + sizeof(kCounterList) / sizeof(CounterDescriptor); + + // Dump an instrumentation description comment at the top of the file. + fprintf(output_stream_, "# counters=%d\n", num_counters); + fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_); + + // Construct Counter objects from counter description array. + for (int i = 0; i < num_counters; i++) { + Counter* counter = new Counter(kCounterList[i].name, kCounterList[i].type); + counters_.push_back(counter); + } + + DumpCounterNames(); +} + + +Instrument::~Instrument() { + // Dump any remaining instruction data to the output file. + DumpCounters(); + + // Free all the counter objects. + std::list::iterator it; + for (it = counters_.begin(); it != counters_.end(); it++) { + delete *it; + } + + if (output_stream_ != stdout) { + fclose(output_stream_); + } +} + + +void Instrument::Update() { + // Increment the instruction counter, and dump all counters if a sample period + // has elapsed. + static Counter* counter = GetCounter("Instruction"); + VIXL_ASSERT(counter->GetType() == Cumulative); + counter->Increment(); + + if ((sample_period_ != 0) && counter->IsEnabled() && + (counter->GetCount() % sample_period_) == 0) { + DumpCounters(); + } +} + + +void Instrument::DumpCounters() { + // Iterate through the counter objects, dumping their values to the output + // stream. + std::list::const_iterator it; + for (it = counters_.begin(); it != counters_.end(); it++) { + fprintf(output_stream_, "%" PRIu64 ",", (*it)->GetCount()); + } + fprintf(output_stream_, "\n"); + fflush(output_stream_); +} + + +void Instrument::DumpCounterNames() { + // Iterate through the counter objects, dumping the counter names to the + // output stream. + std::list::const_iterator it; + for (it = counters_.begin(); it != counters_.end(); it++) { + fprintf(output_stream_, "%s,", (*it)->GetName()); + } + fprintf(output_stream_, "\n"); + fflush(output_stream_); +} + + +void Instrument::HandleInstrumentationEvent(unsigned event) { + switch (event) { + case InstrumentStateEnable: + Enable(); + break; + case InstrumentStateDisable: + Disable(); + break; + default: + DumpEventMarker(event); + } +} + + +void Instrument::DumpEventMarker(unsigned marker) { + // Dumpan event marker to the output stream as a specially formatted comment + // line. + static Counter* counter = GetCounter("Instruction"); + + fprintf(output_stream_, + "# %c%c @ %" PRId64 "\n", + marker & 0xff, + (marker >> 8) & 0xff, + counter->GetCount()); +} + + +Counter* Instrument::GetCounter(const char* name) { + // Get a Counter object by name from the counter list. + std::list::const_iterator it; + for (it = counters_.begin(); it != counters_.end(); it++) { + if (strcmp((*it)->GetName(), name) == 0) { + return *it; + } + } + + // A Counter by that name does not exist: print an error message to stderr + // and the output file, and exit. + static const char* error_message = + "# Error: Unknown counter \"%s\". Exiting.\n"; + fprintf(stderr, error_message, name); + fprintf(output_stream_, error_message, name); + exit(1); +} + + +void Instrument::Enable() { + std::list::iterator it; + for (it = counters_.begin(); it != counters_.end(); it++) { + (*it)->Enable(); + } +} + + +void Instrument::Disable() { + std::list::iterator it; + for (it = counters_.begin(); it != counters_.end(); it++) { + (*it)->Disable(); + } +} + + +void Instrument::VisitPCRelAddressing(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("PC Addressing"); + counter->Increment(); +} + + +void Instrument::VisitAddSubImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Add/Sub DP"); + counter->Increment(); +} + + +void Instrument::VisitLogicalImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Logical DP"); + counter->Increment(); +} + + +void Instrument::VisitMoveWideImmediate(const Instruction* instr) { + Update(); + static Counter* counter = GetCounter("Move Immediate"); + + if (instr->IsMovn() && (instr->GetRd() == kZeroRegCode)) { + unsigned imm = instr->GetImmMoveWide(); + HandleInstrumentationEvent(imm); + } else { + counter->Increment(); + } +} + + +void Instrument::VisitBitfield(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other Int DP"); + counter->Increment(); +} + + +void Instrument::VisitExtract(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other Int DP"); + counter->Increment(); +} + + +void Instrument::VisitUnconditionalBranch(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Unconditional Branch"); + counter->Increment(); +} + + +void Instrument::VisitUnconditionalBranchToRegister(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Unconditional Branch"); + counter->Increment(); +} + + +void Instrument::VisitCompareBranch(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Compare and Branch"); + counter->Increment(); +} + + +void Instrument::VisitTestBranch(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Test and Branch"); + counter->Increment(); +} + + +void Instrument::VisitConditionalBranch(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Conditional Branch"); + counter->Increment(); +} + + +void Instrument::VisitSystem(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +void Instrument::VisitException(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +void Instrument::InstrumentLoadStorePair(const Instruction* instr) { + static Counter* load_pair_counter = GetCounter("Load Pair"); + static Counter* store_pair_counter = GetCounter("Store Pair"); + + if (instr->Mask(LoadStorePairLBit) != 0) { + load_pair_counter->Increment(); + } else { + store_pair_counter->Increment(); + } +} + + +void Instrument::VisitLoadStorePairPostIndex(const Instruction* instr) { + Update(); + InstrumentLoadStorePair(instr); +} + + +void Instrument::VisitLoadStorePairOffset(const Instruction* instr) { + Update(); + InstrumentLoadStorePair(instr); +} + + +void Instrument::VisitLoadStorePairPreIndex(const Instruction* instr) { + Update(); + InstrumentLoadStorePair(instr); +} + + +void Instrument::VisitLoadStorePairNonTemporal(const Instruction* instr) { + Update(); + InstrumentLoadStorePair(instr); +} + + +void Instrument::VisitLoadStoreExclusive(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +void Instrument::VisitAtomicMemory(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +void Instrument::VisitLoadLiteral(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Load Literal"); + counter->Increment(); +} + + +void Instrument::VisitLoadStorePAC(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Load Integer"); + counter->Increment(); +} + + +void Instrument::InstrumentLoadStore(const Instruction* instr) { + static Counter* load_int_counter = GetCounter("Load Integer"); + static Counter* store_int_counter = GetCounter("Store Integer"); + static Counter* load_fp_counter = GetCounter("Load FP"); + static Counter* store_fp_counter = GetCounter("Store FP"); + + switch (instr->Mask(LoadStoreMask)) { + case STRB_w: + case STRH_w: + case STR_w: + VIXL_FALLTHROUGH(); + case STR_x: + store_int_counter->Increment(); + break; + case STR_s: + VIXL_FALLTHROUGH(); + case STR_d: + store_fp_counter->Increment(); + break; + case LDRB_w: + case LDRH_w: + case LDR_w: + case LDR_x: + case LDRSB_x: + case LDRSH_x: + case LDRSW_x: + case LDRSB_w: + VIXL_FALLTHROUGH(); + case LDRSH_w: + load_int_counter->Increment(); + break; + case LDR_s: + VIXL_FALLTHROUGH(); + case LDR_d: + load_fp_counter->Increment(); + break; + } +} + + +void Instrument::VisitLoadStoreUnscaledOffset(const Instruction* instr) { + Update(); + InstrumentLoadStore(instr); +} + + +void Instrument::VisitLoadStorePostIndex(const Instruction* instr) { + USE(instr); + Update(); + InstrumentLoadStore(instr); +} + + +void Instrument::VisitLoadStorePreIndex(const Instruction* instr) { + Update(); + InstrumentLoadStore(instr); +} + + +void Instrument::VisitLoadStoreRegisterOffset(const Instruction* instr) { + Update(); + InstrumentLoadStore(instr); +} + +void Instrument::VisitLoadStoreRCpcUnscaledOffset(const Instruction* instr) { + Update(); + switch (instr->Mask(LoadStoreRCpcUnscaledOffsetMask)) { + case STLURB: + case STLURH: + case STLUR_w: + case STLUR_x: { + static Counter* counter = GetCounter("Store Integer"); + counter->Increment(); + break; + } + case LDAPURB: + case LDAPURSB_w: + case LDAPURSB_x: + case LDAPURH: + case LDAPURSH_w: + case LDAPURSH_x: + case LDAPUR_w: + case LDAPURSW: + case LDAPUR_x: { + static Counter* counter = GetCounter("Load Integer"); + counter->Increment(); + break; + } + } +} + + +void Instrument::VisitLoadStoreUnsignedOffset(const Instruction* instr) { + Update(); + InstrumentLoadStore(instr); +} + + +void Instrument::VisitLogicalShifted(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Logical DP"); + counter->Increment(); +} + + +void Instrument::VisitAddSubShifted(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Add/Sub DP"); + counter->Increment(); +} + + +void Instrument::VisitAddSubExtended(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Add/Sub DP"); + counter->Increment(); +} + + +void Instrument::VisitAddSubWithCarry(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Add/Sub DP"); + counter->Increment(); +} + + +void Instrument::VisitRotateRightIntoFlags(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +void Instrument::VisitEvaluateIntoFlags(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +void Instrument::VisitConditionalCompareRegister(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Conditional Compare"); + counter->Increment(); +} + + +void Instrument::VisitConditionalCompareImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Conditional Compare"); + counter->Increment(); +} + + +void Instrument::VisitConditionalSelect(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Conditional Select"); + counter->Increment(); +} + + +void Instrument::VisitDataProcessing1Source(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other Int DP"); + counter->Increment(); +} + + +void Instrument::VisitDataProcessing2Source(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other Int DP"); + counter->Increment(); +} + + +void Instrument::VisitDataProcessing3Source(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other Int DP"); + counter->Increment(); +} + + +void Instrument::VisitFPCompare(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitFPConditionalCompare(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Conditional Compare"); + counter->Increment(); +} + + +void Instrument::VisitFPConditionalSelect(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Conditional Select"); + counter->Increment(); +} + + +void Instrument::VisitFPImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitFPDataProcessing1Source(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitFPDataProcessing2Source(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitFPDataProcessing3Source(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitFPIntegerConvert(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitFPFixedPointConvert(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("FP DP"); + counter->Increment(); +} + + +void Instrument::VisitCrypto2RegSHA(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Crypto"); + counter->Increment(); +} + + +void Instrument::VisitCrypto3RegSHA(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Crypto"); + counter->Increment(); +} + + +void Instrument::VisitCryptoAES(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Crypto"); + counter->Increment(); +} + + +void Instrument::VisitNEON2RegMisc(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEON2RegMiscFP16(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEON3Same(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEON3SameFP16(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEON3SameExtra(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEON3Different(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONAcrossLanes(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONByIndexedElement(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONCopy(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONExtract(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONLoadStoreMultiStruct(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONLoadStoreMultiStructPostIndex( + const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONLoadStoreSingleStruct(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONLoadStoreSingleStructPostIndex( + const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONModifiedImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalar2RegMisc(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalar2RegMiscFP16(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalar3Diff(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalar3Same(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalar3SameFP16(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalar3SameExtra(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalarByIndexedElement(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalarCopy(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalarPairwise(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONScalarShiftImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONShiftImmediate(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONTable(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitNEONPerm(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("NEON"); + counter->Increment(); +} + + +void Instrument::VisitUnallocated(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +void Instrument::VisitUnimplemented(const Instruction* instr) { + USE(instr); + Update(); + static Counter* counter = GetCounter("Other"); + counter->Increment(); +} + + +} // namespace aarch64 +} // namespace vixl diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/instrument-aarch64.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/instrument-aarch64.h new file mode 100644 index 00000000..4401b3ea --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/instrument-aarch64.h @@ -0,0 +1,117 @@ +// Copyright 2014, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_INSTRUMENT_AARCH64_H_ +#define VIXL_AARCH64_INSTRUMENT_AARCH64_H_ + +#include "../globals-vixl.h" +#include "../utils-vixl.h" + +#include "constants-aarch64.h" +#include "decoder-aarch64.h" +#include "instrument-aarch64.h" + +namespace vixl { +namespace aarch64 { + +const int kCounterNameMaxLength = 256; +const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22; + + +enum InstrumentState { InstrumentStateDisable = 0, InstrumentStateEnable = 1 }; + + +enum CounterType { + Gauge = 0, // Gauge counters reset themselves after reading. + Cumulative = 1 // Cumulative counters keep their value after reading. +}; + + +class Counter { + public: + explicit Counter(const char* name, CounterType type = Gauge); + + void Increment(); + void Enable(); + void Disable(); + bool IsEnabled(); + uint64_t GetCount(); + VIXL_DEPRECATED("GetCount", uint64_t count()) { return GetCount(); } + + const char* GetName(); + VIXL_DEPRECATED("GetName", const char* name()) { return GetName(); } + + CounterType GetType(); + VIXL_DEPRECATED("GetType", CounterType type()) { return GetType(); } + + private: + char name_[kCounterNameMaxLength]; + uint64_t count_; + bool enabled_; + CounterType type_; +}; + + +class Instrument : public DecoderVisitor { + public: + explicit Instrument( + const char* datafile = NULL, + uint64_t sample_period = kDefaultInstrumentationSamplingPeriod); + ~Instrument(); + + void Enable(); + void Disable(); + +// Declare all Visitor functions. +#define DECLARE(A) void Visit##A(const Instruction* instr) VIXL_OVERRIDE; + VISITOR_LIST(DECLARE) +#undef DECLARE + + private: + void Update(); + void DumpCounters(); + void DumpCounterNames(); + void DumpEventMarker(unsigned marker); + void HandleInstrumentationEvent(unsigned event); + Counter* GetCounter(const char* name); + + void InstrumentLoadStore(const Instruction* instr); + void InstrumentLoadStorePair(const Instruction* instr); + + std::list counters_; + + FILE* output_stream_; + + // Counter information is dumped every sample_period_ instructions decoded. + // For a sample_period_ = 0 a final counter value is only produced when the + // Instrumentation class is destroyed. + uint64_t sample_period_; +}; + +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_AARCH64_INSTRUMENT_AARCH64_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/logic-aarch64.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/logic-aarch64.cc new file mode 100644 index 00000000..022e22f6 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/logic-aarch64.cc @@ -0,0 +1,5484 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64 + +#include + +#include "simulator-aarch64.h" + +namespace vixl { +namespace aarch64 { + +using vixl::internal::SimFloat16; + +template +bool IsFloat64() { + return false; +} +template <> +bool IsFloat64() { + return true; +} + +template +bool IsFloat32() { + return false; +} +template <> +bool IsFloat32() { + return true; +} + +template +bool IsFloat16() { + return false; +} +template <> +bool IsFloat16() { + return true; +} +template <> +bool IsFloat16() { + return true; +} + +template <> +double Simulator::FPDefaultNaN() { + return kFP64DefaultNaN; +} + + +template <> +float Simulator::FPDefaultNaN() { + return kFP32DefaultNaN; +} + + +template <> +SimFloat16 Simulator::FPDefaultNaN() { + return SimFloat16(kFP16DefaultNaN); +} + + +double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) { + if (src >= 0) { + return UFixedToDouble(src, fbits, round); + } else if (src == INT64_MIN) { + return -UFixedToDouble(src, fbits, round); + } else { + return -UFixedToDouble(-src, fbits, round); + } +} + + +double Simulator::UFixedToDouble(uint64_t src, int fbits, FPRounding round) { + // An input of 0 is a special case because the result is effectively + // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit. + if (src == 0) { + return 0.0; + } + + // Calculate the exponent. The highest significant bit will have the value + // 2^exponent. + const int highest_significant_bit = 63 - CountLeadingZeros(src); + const int64_t exponent = highest_significant_bit - fbits; + + return FPRoundToDouble(0, exponent, src, round); +} + + +float Simulator::FixedToFloat(int64_t src, int fbits, FPRounding round) { + if (src >= 0) { + return UFixedToFloat(src, fbits, round); + } else if (src == INT64_MIN) { + return -UFixedToFloat(src, fbits, round); + } else { + return -UFixedToFloat(-src, fbits, round); + } +} + + +float Simulator::UFixedToFloat(uint64_t src, int fbits, FPRounding round) { + // An input of 0 is a special case because the result is effectively + // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit. + if (src == 0) { + return 0.0f; + } + + // Calculate the exponent. The highest significant bit will have the value + // 2^exponent. + const int highest_significant_bit = 63 - CountLeadingZeros(src); + const int32_t exponent = highest_significant_bit - fbits; + + return FPRoundToFloat(0, exponent, src, round); +} + + +SimFloat16 Simulator::FixedToFloat16(int64_t src, int fbits, FPRounding round) { + if (src >= 0) { + return UFixedToFloat16(src, fbits, round); + } else if (src == INT64_MIN) { + return -UFixedToFloat16(src, fbits, round); + } else { + return -UFixedToFloat16(-src, fbits, round); + } +} + + +SimFloat16 Simulator::UFixedToFloat16(uint64_t src, + int fbits, + FPRounding round) { + // An input of 0 is a special case because the result is effectively + // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit. + if (src == 0) { + return 0.0f; + } + + // Calculate the exponent. The highest significant bit will have the value + // 2^exponent. + const int highest_significant_bit = 63 - CountLeadingZeros(src); + const int16_t exponent = highest_significant_bit - fbits; + + return FPRoundToFloat16(0, exponent, src, round); +} + + +void Simulator::ld1(VectorFormat vform, LogicVRegister dst, uint64_t addr) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.ReadUintFromMem(vform, i, addr); + addr += LaneSizeInBytesFromFormat(vform); + } +} + + +void Simulator::ld1(VectorFormat vform, + LogicVRegister dst, + int index, + uint64_t addr) { + dst.ReadUintFromMem(vform, index, addr); +} + + +void Simulator::ld1r(VectorFormat vform, LogicVRegister dst, uint64_t addr) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.ReadUintFromMem(vform, i, addr); + } +} + + +void Simulator::ld2(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr1 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr1); + dst2.ReadUintFromMem(vform, i, addr2); + addr1 += 2 * esize; + addr2 += 2 * esize; + } +} + + +void Simulator::ld2(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + int index, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform); + dst1.ReadUintFromMem(vform, index, addr1); + dst2.ReadUintFromMem(vform, index, addr2); +} + + +void Simulator::ld2r(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + uint64_t addr) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr); + dst2.ReadUintFromMem(vform, i, addr2); + } +} + + +void Simulator::ld3(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr1 + esize; + uint64_t addr3 = addr2 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr1); + dst2.ReadUintFromMem(vform, i, addr2); + dst3.ReadUintFromMem(vform, i, addr3); + addr1 += 3 * esize; + addr2 += 3 * esize; + addr3 += 3 * esize; + } +} + + +void Simulator::ld3(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + int index, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform); + uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform); + dst1.ReadUintFromMem(vform, index, addr1); + dst2.ReadUintFromMem(vform, index, addr2); + dst3.ReadUintFromMem(vform, index, addr3); +} + + +void Simulator::ld3r(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + uint64_t addr) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform); + uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr); + dst2.ReadUintFromMem(vform, i, addr2); + dst3.ReadUintFromMem(vform, i, addr3); + } +} + + +void Simulator::ld4(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + dst4.ClearForWrite(vform); + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr1 + esize; + uint64_t addr3 = addr2 + esize; + uint64_t addr4 = addr3 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr1); + dst2.ReadUintFromMem(vform, i, addr2); + dst3.ReadUintFromMem(vform, i, addr3); + dst4.ReadUintFromMem(vform, i, addr4); + addr1 += 4 * esize; + addr2 += 4 * esize; + addr3 += 4 * esize; + addr4 += 4 * esize; + } +} + + +void Simulator::ld4(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + int index, + uint64_t addr1) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + dst4.ClearForWrite(vform); + uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform); + uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform); + uint64_t addr4 = addr3 + LaneSizeInBytesFromFormat(vform); + dst1.ReadUintFromMem(vform, index, addr1); + dst2.ReadUintFromMem(vform, index, addr2); + dst3.ReadUintFromMem(vform, index, addr3); + dst4.ReadUintFromMem(vform, index, addr4); +} + + +void Simulator::ld4r(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + uint64_t addr) { + dst1.ClearForWrite(vform); + dst2.ClearForWrite(vform); + dst3.ClearForWrite(vform); + dst4.ClearForWrite(vform); + uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform); + uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform); + uint64_t addr4 = addr3 + LaneSizeInBytesFromFormat(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst1.ReadUintFromMem(vform, i, addr); + dst2.ReadUintFromMem(vform, i, addr2); + dst3.ReadUintFromMem(vform, i, addr3); + dst4.ReadUintFromMem(vform, i, addr4); + } +} + + +void Simulator::st1(VectorFormat vform, LogicVRegister src, uint64_t addr) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + src.WriteUintToMem(vform, i, addr); + addr += LaneSizeInBytesFromFormat(vform); + } +} + + +void Simulator::st1(VectorFormat vform, + LogicVRegister src, + int index, + uint64_t addr) { + src.WriteUintToMem(vform, index, addr); +} + + +void Simulator::st2(VectorFormat vform, + LogicVRegister dst, + LogicVRegister dst2, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.WriteUintToMem(vform, i, addr); + dst2.WriteUintToMem(vform, i, addr2); + addr += 2 * esize; + addr2 += 2 * esize; + } +} + + +void Simulator::st2(VectorFormat vform, + LogicVRegister dst, + LogicVRegister dst2, + int index, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + dst.WriteUintToMem(vform, index, addr); + dst2.WriteUintToMem(vform, index, addr + 1 * esize); +} + + +void Simulator::st3(VectorFormat vform, + LogicVRegister dst, + LogicVRegister dst2, + LogicVRegister dst3, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr + esize; + uint64_t addr3 = addr2 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.WriteUintToMem(vform, i, addr); + dst2.WriteUintToMem(vform, i, addr2); + dst3.WriteUintToMem(vform, i, addr3); + addr += 3 * esize; + addr2 += 3 * esize; + addr3 += 3 * esize; + } +} + + +void Simulator::st3(VectorFormat vform, + LogicVRegister dst, + LogicVRegister dst2, + LogicVRegister dst3, + int index, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + dst.WriteUintToMem(vform, index, addr); + dst2.WriteUintToMem(vform, index, addr + 1 * esize); + dst3.WriteUintToMem(vform, index, addr + 2 * esize); +} + + +void Simulator::st4(VectorFormat vform, + LogicVRegister dst, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + uint64_t addr2 = addr + esize; + uint64_t addr3 = addr2 + esize; + uint64_t addr4 = addr3 + esize; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.WriteUintToMem(vform, i, addr); + dst2.WriteUintToMem(vform, i, addr2); + dst3.WriteUintToMem(vform, i, addr3); + dst4.WriteUintToMem(vform, i, addr4); + addr += 4 * esize; + addr2 += 4 * esize; + addr3 += 4 * esize; + addr4 += 4 * esize; + } +} + + +void Simulator::st4(VectorFormat vform, + LogicVRegister dst, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + int index, + uint64_t addr) { + int esize = LaneSizeInBytesFromFormat(vform); + dst.WriteUintToMem(vform, index, addr); + dst2.WriteUintToMem(vform, index, addr + 1 * esize); + dst3.WriteUintToMem(vform, index, addr + 2 * esize); + dst4.WriteUintToMem(vform, index, addr + 3 * esize); +} + + +LogicVRegister Simulator::cmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t sa = src1.Int(vform, i); + int64_t sb = src2.Int(vform, i); + uint64_t ua = src1.Uint(vform, i); + uint64_t ub = src2.Uint(vform, i); + bool result = false; + switch (cond) { + case eq: + result = (ua == ub); + break; + case ge: + result = (sa >= sb); + break; + case gt: + result = (sa > sb); + break; + case hi: + result = (ua > ub); + break; + case hs: + result = (ua >= ub); + break; + case lt: + result = (sa < sb); + break; + case le: + result = (sa <= sb); + break; + default: + VIXL_UNREACHABLE(); + break; + } + dst.SetUint(vform, i, result ? MaxUintFromFormat(vform) : 0); + } + return dst; +} + + +LogicVRegister Simulator::cmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + int imm, + Condition cond) { + SimVRegister temp; + LogicVRegister imm_reg = dup_immediate(vform, temp, imm); + return cmp(vform, dst, src1, imm_reg, cond); +} + + +LogicVRegister Simulator::cmptst(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t ua = src1.Uint(vform, i); + uint64_t ub = src2.Uint(vform, i); + dst.SetUint(vform, i, ((ua & ub) != 0) ? MaxUintFromFormat(vform) : 0); + } + return dst; +} + + +LogicVRegister Simulator::add(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + int lane_size = LaneSizeInBitsFromFormat(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + // Test for unsigned saturation. + uint64_t ua = src1.UintLeftJustified(vform, i); + uint64_t ub = src2.UintLeftJustified(vform, i); + uint64_t ur = ua + ub; + if (ur < ua) { + dst.SetUnsignedSat(i, true); + } + + // Test for signed saturation. + bool pos_a = (ua >> 63) == 0; + bool pos_b = (ub >> 63) == 0; + bool pos_r = (ur >> 63) == 0; + // If the signs of the operands are the same, but different from the result, + // there was an overflow. + if ((pos_a == pos_b) && (pos_a != pos_r)) { + dst.SetSignedSat(i, pos_a); + } + + dst.SetInt(vform, i, ur >> (64 - lane_size)); + } + return dst; +} + + +LogicVRegister Simulator::addp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uzp1(vform, temp1, src1, src2); + uzp2(vform, temp2, src1, src2); + add(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::mla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + mul(vform, temp, src1, src2); + add(vform, dst, dst, temp); + return dst; +} + + +LogicVRegister Simulator::mls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + mul(vform, temp, src1, src2); + sub(vform, dst, dst, temp); + return dst; +} + + +LogicVRegister Simulator::mul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) * src2.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::mul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return mul(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::mla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return mla(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::mls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return mls(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::smull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smull(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::smull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smull2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::umull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umull(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::umull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umull2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::smlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smlal(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::smlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smlal2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::umlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umlal(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::umlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umlal2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::smlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smlsl(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::smlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return smlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::umlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umlsl(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::umlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return umlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmull(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmull2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmlal(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmlal2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmlsl(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = + VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform)); + return sqdmlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return sqdmulh(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqrdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return sqrdmulh(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sdot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return sdot(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqrdmlah(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return sqrdmlah(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::udot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return udot(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +LogicVRegister Simulator::sqrdmlsh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + SimVRegister temp; + VectorFormat indexform = VectorFormatFillQ(vform); + return sqrdmlsh(vform, dst, src1, dup_element(indexform, temp, src2, index)); +} + + +uint16_t Simulator::PolynomialMult(uint8_t op1, uint8_t op2) const { + uint16_t result = 0; + uint16_t extended_op2 = op2; + for (int i = 0; i < 8; ++i) { + if ((op1 >> i) & 1) { + result = result ^ (extended_op2 << i); + } + } + return result; +} + + +LogicVRegister Simulator::pmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, + i, + PolynomialMult(src1.Uint(vform, i), src2.Uint(vform, i))); + } + return dst; +} + + +LogicVRegister Simulator::pmull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + VectorFormat vform_src = VectorFormatHalfWidth(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, + i, + PolynomialMult(src1.Uint(vform_src, i), + src2.Uint(vform_src, i))); + } + return dst; +} + + +LogicVRegister Simulator::pmull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + VectorFormat vform_src = VectorFormatHalfWidthDoubleLanes(vform); + dst.ClearForWrite(vform); + int lane_count = LaneCountFromFormat(vform); + for (int i = 0; i < lane_count; i++) { + dst.SetUint(vform, + i, + PolynomialMult(src1.Uint(vform_src, lane_count + i), + src2.Uint(vform_src, lane_count + i))); + } + return dst; +} + + +LogicVRegister Simulator::sub(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + int lane_size = LaneSizeInBitsFromFormat(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + // Test for unsigned saturation. + uint64_t ua = src1.UintLeftJustified(vform, i); + uint64_t ub = src2.UintLeftJustified(vform, i); + uint64_t ur = ua - ub; + if (ub > ua) { + dst.SetUnsignedSat(i, false); + } + + // Test for signed saturation. + bool pos_a = (ua >> 63) == 0; + bool pos_b = (ub >> 63) == 0; + bool pos_r = (ur >> 63) == 0; + // If the signs of the operands are different, and the sign of the first + // operand doesn't match the result, there was an overflow. + if ((pos_a != pos_b) && (pos_a != pos_r)) { + dst.SetSignedSat(i, pos_a); + } + + dst.SetInt(vform, i, ur >> (64 - lane_size)); + } + return dst; +} + + +LogicVRegister Simulator::and_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) & src2.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::orr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) | src2.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::orn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) | ~src2.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::eor(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) ^ src2.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::bic(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src1.Uint(vform, i) & ~src2.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::bic(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + uint64_t imm) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; ++i) { + result[i] = src.Uint(vform, i) & ~imm; + } + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::bif(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t operand1 = dst.Uint(vform, i); + uint64_t operand2 = ~src2.Uint(vform, i); + uint64_t operand3 = src1.Uint(vform, i); + uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2); + dst.SetUint(vform, i, result); + } + return dst; +} + + +LogicVRegister Simulator::bit(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t operand1 = dst.Uint(vform, i); + uint64_t operand2 = src2.Uint(vform, i); + uint64_t operand3 = src1.Uint(vform, i); + uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2); + dst.SetUint(vform, i, result); + } + return dst; +} + + +LogicVRegister Simulator::bsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t operand1 = src2.Uint(vform, i); + uint64_t operand2 = dst.Uint(vform, i); + uint64_t operand3 = src1.Uint(vform, i); + uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2); + dst.SetUint(vform, i, result); + } + return dst; +} + + +LogicVRegister Simulator::sminmax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t src1_val = src1.Int(vform, i); + int64_t src2_val = src2.Int(vform, i); + int64_t dst_val; + if (max) { + dst_val = (src1_val > src2_val) ? src1_val : src2_val; + } else { + dst_val = (src1_val < src2_val) ? src1_val : src2_val; + } + dst.SetInt(vform, i, dst_val); + } + return dst; +} + + +LogicVRegister Simulator::smax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return sminmax(vform, dst, src1, src2, true); +} + + +LogicVRegister Simulator::smin(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return sminmax(vform, dst, src1, src2, false); +} + + +LogicVRegister Simulator::sminmaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max) { + int lanes = LaneCountFromFormat(vform); + int64_t result[kMaxLanesPerVector]; + const LogicVRegister* src = &src1; + for (int j = 0; j < 2; j++) { + for (int i = 0; i < lanes; i += 2) { + int64_t first_val = src->Int(vform, i); + int64_t second_val = src->Int(vform, i + 1); + int64_t dst_val; + if (max) { + dst_val = (first_val > second_val) ? first_val : second_val; + } else { + dst_val = (first_val < second_val) ? first_val : second_val; + } + VIXL_ASSERT(((i >> 1) + (j * lanes / 2)) < kMaxLanesPerVector); + result[(i >> 1) + (j * lanes / 2)] = dst_val; + } + src = &src2; + } + dst.SetIntArray(vform, result); + return dst; +} + + +LogicVRegister Simulator::smaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return sminmaxp(vform, dst, src1, src2, true); +} + + +LogicVRegister Simulator::sminp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return sminmaxp(vform, dst, src1, src2, false); +} + + +LogicVRegister Simulator::addp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VIXL_ASSERT(vform == kFormatD); + + uint64_t dst_val = src.Uint(kFormat2D, 0) + src.Uint(kFormat2D, 1); + dst.ClearForWrite(vform); + dst.SetUint(vform, 0, dst_val); + return dst; +} + + +LogicVRegister Simulator::addv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_dst = + ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform)); + + + int64_t dst_val = 0; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst_val += src.Int(vform, i); + } + + dst.ClearForWrite(vform_dst); + dst.SetInt(vform_dst, 0, dst_val); + return dst; +} + + +LogicVRegister Simulator::saddlv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_dst = + ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform) * 2); + + int64_t dst_val = 0; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst_val += src.Int(vform, i); + } + + dst.ClearForWrite(vform_dst); + dst.SetInt(vform_dst, 0, dst_val); + return dst; +} + + +LogicVRegister Simulator::uaddlv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_dst = + ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform) * 2); + + uint64_t dst_val = 0; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst_val += src.Uint(vform, i); + } + + dst.ClearForWrite(vform_dst); + dst.SetUint(vform_dst, 0, dst_val); + return dst; +} + + +LogicVRegister Simulator::sminmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + bool max) { + int64_t dst_val = max ? INT64_MIN : INT64_MAX; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t src_val = src.Int(vform, i); + if (max) { + dst_val = (src_val > dst_val) ? src_val : dst_val; + } else { + dst_val = (src_val < dst_val) ? src_val : dst_val; + } + } + dst.ClearForWrite(ScalarFormatFromFormat(vform)); + dst.SetInt(vform, 0, dst_val); + return dst; +} + + +LogicVRegister Simulator::smaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + sminmaxv(vform, dst, src, true); + return dst; +} + + +LogicVRegister Simulator::sminv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + sminmaxv(vform, dst, src, false); + return dst; +} + + +LogicVRegister Simulator::uminmax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t src1_val = src1.Uint(vform, i); + uint64_t src2_val = src2.Uint(vform, i); + uint64_t dst_val; + if (max) { + dst_val = (src1_val > src2_val) ? src1_val : src2_val; + } else { + dst_val = (src1_val < src2_val) ? src1_val : src2_val; + } + dst.SetUint(vform, i, dst_val); + } + return dst; +} + + +LogicVRegister Simulator::umax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return uminmax(vform, dst, src1, src2, true); +} + + +LogicVRegister Simulator::umin(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return uminmax(vform, dst, src1, src2, false); +} + + +LogicVRegister Simulator::uminmaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max) { + int lanes = LaneCountFromFormat(vform); + uint64_t result[kMaxLanesPerVector]; + const LogicVRegister* src = &src1; + for (int j = 0; j < 2; j++) { + for (int i = 0; i < LaneCountFromFormat(vform); i += 2) { + uint64_t first_val = src->Uint(vform, i); + uint64_t second_val = src->Uint(vform, i + 1); + uint64_t dst_val; + if (max) { + dst_val = (first_val > second_val) ? first_val : second_val; + } else { + dst_val = (first_val < second_val) ? first_val : second_val; + } + VIXL_ASSERT(((i >> 1) + (j * lanes / 2)) < kMaxLanesPerVector); + result[(i >> 1) + (j * lanes / 2)] = dst_val; + } + src = &src2; + } + dst.SetUintArray(vform, result); + return dst; +} + + +LogicVRegister Simulator::umaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return uminmaxp(vform, dst, src1, src2, true); +} + + +LogicVRegister Simulator::uminp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return uminmaxp(vform, dst, src1, src2, false); +} + + +LogicVRegister Simulator::uminmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + bool max) { + uint64_t dst_val = max ? 0 : UINT64_MAX; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t src_val = src.Uint(vform, i); + if (max) { + dst_val = (src_val > dst_val) ? src_val : dst_val; + } else { + dst_val = (src_val < dst_val) ? src_val : dst_val; + } + } + dst.ClearForWrite(ScalarFormatFromFormat(vform)); + dst.SetUint(vform, 0, dst_val); + return dst; +} + + +LogicVRegister Simulator::umaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + uminmaxv(vform, dst, src, true); + return dst; +} + + +LogicVRegister Simulator::uminv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + uminmaxv(vform, dst, src, false); + return dst; +} + + +LogicVRegister Simulator::shl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, shift); + return ushl(vform, dst, src, shiftreg); +} + + +LogicVRegister Simulator::sshll(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp1, temp2; + LogicVRegister shiftreg = dup_immediate(vform, temp1, shift); + LogicVRegister extendedreg = sxtl(vform, temp2, src); + return sshl(vform, dst, extendedreg, shiftreg); +} + + +LogicVRegister Simulator::sshll2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp1, temp2; + LogicVRegister shiftreg = dup_immediate(vform, temp1, shift); + LogicVRegister extendedreg = sxtl2(vform, temp2, src); + return sshl(vform, dst, extendedreg, shiftreg); +} + + +LogicVRegister Simulator::shll(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + int shift = LaneSizeInBitsFromFormat(vform) / 2; + return sshll(vform, dst, src, shift); +} + + +LogicVRegister Simulator::shll2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + int shift = LaneSizeInBitsFromFormat(vform) / 2; + return sshll2(vform, dst, src, shift); +} + + +LogicVRegister Simulator::ushll(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp1, temp2; + LogicVRegister shiftreg = dup_immediate(vform, temp1, shift); + LogicVRegister extendedreg = uxtl(vform, temp2, src); + return ushl(vform, dst, extendedreg, shiftreg); +} + + +LogicVRegister Simulator::ushll2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp1, temp2; + LogicVRegister shiftreg = dup_immediate(vform, temp1, shift); + LogicVRegister extendedreg = uxtl2(vform, temp2, src); + return ushl(vform, dst, extendedreg, shiftreg); +} + + +LogicVRegister Simulator::sli(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + dst.ClearForWrite(vform); + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; i++) { + uint64_t src_lane = src.Uint(vform, i); + uint64_t dst_lane = dst.Uint(vform, i); + uint64_t shifted = src_lane << shift; + uint64_t mask = MaxUintFromFormat(vform) << shift; + dst.SetUint(vform, i, (dst_lane & ~mask) | shifted); + } + return dst; +} + + +LogicVRegister Simulator::sqshl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, shift); + return sshl(vform, dst, src, shiftreg).SignedSaturate(vform); +} + + +LogicVRegister Simulator::uqshl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, shift); + return ushl(vform, dst, src, shiftreg).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::sqshlu(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, shift); + return sshl(vform, dst, src, shiftreg).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::sri(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + dst.ClearForWrite(vform); + int laneCount = LaneCountFromFormat(vform); + VIXL_ASSERT((shift > 0) && + (shift <= static_cast(LaneSizeInBitsFromFormat(vform)))); + for (int i = 0; i < laneCount; i++) { + uint64_t src_lane = src.Uint(vform, i); + uint64_t dst_lane = dst.Uint(vform, i); + uint64_t shifted; + uint64_t mask; + if (shift == 64) { + shifted = 0; + mask = 0; + } else { + shifted = src_lane >> shift; + mask = MaxUintFromFormat(vform) >> shift; + } + dst.SetUint(vform, i, (dst_lane & ~mask) | shifted); + } + return dst; +} + + +LogicVRegister Simulator::ushr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, -shift); + return ushl(vform, dst, src, shiftreg); +} + + +LogicVRegister Simulator::sshr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + VIXL_ASSERT(shift >= 0); + SimVRegister temp; + LogicVRegister shiftreg = dup_immediate(vform, temp, -shift); + return sshl(vform, dst, src, shiftreg); +} + + +LogicVRegister Simulator::ssra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + LogicVRegister shifted_reg = sshr(vform, temp, src, shift); + return add(vform, dst, dst, shifted_reg); +} + + +LogicVRegister Simulator::usra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + LogicVRegister shifted_reg = ushr(vform, temp, src, shift); + return add(vform, dst, dst, shifted_reg); +} + + +LogicVRegister Simulator::srsra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + LogicVRegister shifted_reg = sshr(vform, temp, src, shift).Round(vform); + return add(vform, dst, dst, shifted_reg); +} + + +LogicVRegister Simulator::ursra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + LogicVRegister shifted_reg = ushr(vform, temp, src, shift).Round(vform); + return add(vform, dst, dst, shifted_reg); +} + + +LogicVRegister Simulator::cls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + uint64_t result[16]; + int laneSizeInBits = LaneSizeInBitsFromFormat(vform); + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; i++) { + result[i] = CountLeadingSignBits(src.Int(vform, i), laneSizeInBits); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::clz(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + uint64_t result[16]; + int laneSizeInBits = LaneSizeInBitsFromFormat(vform); + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; i++) { + result[i] = CountLeadingZeros(src.Uint(vform, i), laneSizeInBits); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::cnt(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + uint64_t result[16]; + int laneSizeInBits = LaneSizeInBitsFromFormat(vform); + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; i++) { + uint64_t value = src.Uint(vform, i); + result[i] = 0; + for (int j = 0; j < laneSizeInBits; j++) { + result[i] += (value & 1); + value >>= 1; + } + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::sshl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int8_t shift_val = src2.Int(vform, i); + int64_t lj_src_val = src1.IntLeftJustified(vform, i); + + // Set signed saturation state. + if ((shift_val > CountLeadingSignBits(lj_src_val)) && (lj_src_val != 0)) { + dst.SetSignedSat(i, lj_src_val >= 0); + } + + // Set unsigned saturation state. + if (lj_src_val < 0) { + dst.SetUnsignedSat(i, false); + } else if ((shift_val > CountLeadingZeros(lj_src_val)) && + (lj_src_val != 0)) { + dst.SetUnsignedSat(i, true); + } + + int64_t src_val = src1.Int(vform, i); + bool src_is_negative = src_val < 0; + if (shift_val > 63) { + dst.SetInt(vform, i, 0); + } else if (shift_val < -63) { + dst.SetRounding(i, src_is_negative); + dst.SetInt(vform, i, src_is_negative ? -1 : 0); + } else { + // Use unsigned types for shifts, as behaviour is undefined for signed + // lhs. + uint64_t usrc_val = static_cast(src_val); + + if (shift_val < 0) { + // Convert to right shift. + shift_val = -shift_val; + + // Set rounding state by testing most-significant bit shifted out. + // Rounding only needed on right shifts. + if (((usrc_val >> (shift_val - 1)) & 1) == 1) { + dst.SetRounding(i, true); + } + + usrc_val >>= shift_val; + + if (src_is_negative) { + // Simulate sign-extension. + usrc_val |= (~UINT64_C(0) << (64 - shift_val)); + } + } else { + usrc_val <<= shift_val; + } + dst.SetUint(vform, i, usrc_val); + } + } + return dst; +} + + +LogicVRegister Simulator::ushl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int8_t shift_val = src2.Int(vform, i); + uint64_t lj_src_val = src1.UintLeftJustified(vform, i); + + // Set saturation state. + if ((shift_val > CountLeadingZeros(lj_src_val)) && (lj_src_val != 0)) { + dst.SetUnsignedSat(i, true); + } + + uint64_t src_val = src1.Uint(vform, i); + if ((shift_val > 63) || (shift_val < -64)) { + dst.SetUint(vform, i, 0); + } else { + if (shift_val < 0) { + // Set rounding state. Rounding only needed on right shifts. + if (((src_val >> (-shift_val - 1)) & 1) == 1) { + dst.SetRounding(i, true); + } + + if (shift_val == -64) { + src_val = 0; + } else { + src_val >>= -shift_val; + } + } else { + src_val <<= shift_val; + } + dst.SetUint(vform, i, src_val); + } + } + return dst; +} + + +LogicVRegister Simulator::neg(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + // Test for signed saturation. + int64_t sa = src.Int(vform, i); + if (sa == MinIntFromFormat(vform)) { + dst.SetSignedSat(i, true); + } + dst.SetInt(vform, i, (sa == INT64_MIN) ? sa : -sa); + } + return dst; +} + + +LogicVRegister Simulator::suqadd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t sa = dst.IntLeftJustified(vform, i); + uint64_t ub = src.UintLeftJustified(vform, i); + uint64_t ur = sa + ub; + + int64_t sr; + memcpy(&sr, &ur, sizeof(sr)); + if (sr < sa) { // Test for signed positive saturation. + dst.SetInt(vform, i, MaxIntFromFormat(vform)); + } else { + dst.SetUint(vform, i, dst.Int(vform, i) + src.Uint(vform, i)); + } + } + return dst; +} + + +LogicVRegister Simulator::usqadd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t ua = dst.UintLeftJustified(vform, i); + int64_t sb = src.IntLeftJustified(vform, i); + uint64_t ur = ua + sb; + + if ((sb > 0) && (ur <= ua)) { + dst.SetUint(vform, i, MaxUintFromFormat(vform)); // Positive saturation. + } else if ((sb < 0) && (ur >= ua)) { + dst.SetUint(vform, i, 0); // Negative saturation. + } else { + dst.SetUint(vform, i, dst.Uint(vform, i) + src.Int(vform, i)); + } + } + return dst; +} + + +LogicVRegister Simulator::abs(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + // Test for signed saturation. + int64_t sa = src.Int(vform, i); + if (sa == MinIntFromFormat(vform)) { + dst.SetSignedSat(i, true); + } + if (sa < 0) { + dst.SetInt(vform, i, (sa == INT64_MIN) ? sa : -sa); + } else { + dst.SetInt(vform, i, sa); + } + } + return dst; +} + + +LogicVRegister Simulator::extractnarrow(VectorFormat dstform, + LogicVRegister dst, + bool dstIsSigned, + const LogicVRegister& src, + bool srcIsSigned) { + bool upperhalf = false; + VectorFormat srcform = kFormatUndefined; + int64_t ssrc[8]; + uint64_t usrc[8]; + + switch (dstform) { + case kFormat8B: + upperhalf = false; + srcform = kFormat8H; + break; + case kFormat16B: + upperhalf = true; + srcform = kFormat8H; + break; + case kFormat4H: + upperhalf = false; + srcform = kFormat4S; + break; + case kFormat8H: + upperhalf = true; + srcform = kFormat4S; + break; + case kFormat2S: + upperhalf = false; + srcform = kFormat2D; + break; + case kFormat4S: + upperhalf = true; + srcform = kFormat2D; + break; + case kFormatB: + upperhalf = false; + srcform = kFormatH; + break; + case kFormatH: + upperhalf = false; + srcform = kFormatS; + break; + case kFormatS: + upperhalf = false; + srcform = kFormatD; + break; + default: + VIXL_UNIMPLEMENTED(); + } + + for (int i = 0; i < LaneCountFromFormat(srcform); i++) { + ssrc[i] = src.Int(srcform, i); + usrc[i] = src.Uint(srcform, i); + } + + int offset; + if (upperhalf) { + offset = LaneCountFromFormat(dstform) / 2; + } else { + offset = 0; + dst.ClearForWrite(dstform); + } + + for (int i = 0; i < LaneCountFromFormat(srcform); i++) { + // Test for signed saturation + if (ssrc[i] > MaxIntFromFormat(dstform)) { + dst.SetSignedSat(offset + i, true); + } else if (ssrc[i] < MinIntFromFormat(dstform)) { + dst.SetSignedSat(offset + i, false); + } + + // Test for unsigned saturation + if (srcIsSigned) { + if (ssrc[i] > static_cast(MaxUintFromFormat(dstform))) { + dst.SetUnsignedSat(offset + i, true); + } else if (ssrc[i] < 0) { + dst.SetUnsignedSat(offset + i, false); + } + } else { + if (usrc[i] > MaxUintFromFormat(dstform)) { + dst.SetUnsignedSat(offset + i, true); + } + } + + int64_t result; + if (srcIsSigned) { + result = ssrc[i] & MaxUintFromFormat(dstform); + } else { + result = usrc[i] & MaxUintFromFormat(dstform); + } + + if (dstIsSigned) { + dst.SetInt(dstform, offset + i, result); + } else { + dst.SetUint(dstform, offset + i, result); + } + } + return dst; +} + + +LogicVRegister Simulator::xtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return extractnarrow(vform, dst, true, src, true); +} + + +LogicVRegister Simulator::sqxtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return extractnarrow(vform, dst, true, src, true).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqxtun(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return extractnarrow(vform, dst, false, src, true).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::uqxtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return extractnarrow(vform, dst, false, src, false).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::absdiff(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool issigned) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + if (issigned) { + int64_t sr = src1.Int(vform, i) - src2.Int(vform, i); + sr = sr > 0 ? sr : -sr; + dst.SetInt(vform, i, sr); + } else { + int64_t sr = src1.Uint(vform, i) - src2.Uint(vform, i); + sr = sr > 0 ? sr : -sr; + dst.SetUint(vform, i, sr); + } + } + return dst; +} + + +LogicVRegister Simulator::saba(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + dst.ClearForWrite(vform); + absdiff(vform, temp, src1, src2, true); + add(vform, dst, dst, temp); + return dst; +} + + +LogicVRegister Simulator::uaba(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + dst.ClearForWrite(vform); + absdiff(vform, temp, src1, src2, false); + add(vform, dst, dst, temp); + return dst; +} + + +LogicVRegister Simulator::not_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, ~src.Uint(vform, i)); + } + return dst; +} + + +LogicVRegister Simulator::rbit(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int laneSizeInBits = LaneSizeInBitsFromFormat(vform); + uint64_t reversed_value; + uint64_t value; + for (int i = 0; i < laneCount; i++) { + value = src.Uint(vform, i); + reversed_value = 0; + for (int j = 0; j < laneSizeInBits; j++) { + reversed_value = (reversed_value << 1) | (value & 1); + value >>= 1; + } + result[i] = reversed_value; + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::rev(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int revSize) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int laneSize = LaneSizeInBytesFromFormat(vform); + int lanesPerLoop = revSize / laneSize; + for (int i = 0; i < laneCount; i += lanesPerLoop) { + for (int j = 0; j < lanesPerLoop; j++) { + result[i + lanesPerLoop - 1 - j] = src.Uint(vform, i + j); + } + } + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::rev16(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return rev(vform, dst, src, 2); +} + + +LogicVRegister Simulator::rev32(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return rev(vform, dst, src, 4); +} + + +LogicVRegister Simulator::rev64(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return rev(vform, dst, src, 8); +} + + +LogicVRegister Simulator::addlp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + bool is_signed, + bool do_accumulate) { + VectorFormat vformsrc = VectorFormatHalfWidthDoubleLanes(vform); + VIXL_ASSERT(LaneSizeInBitsFromFormat(vformsrc) <= 32); + VIXL_ASSERT(LaneCountFromFormat(vform) <= 8); + + uint64_t result[8]; + int lane_count = LaneCountFromFormat(vform); + for (int i = 0; i < lane_count; i++) { + if (is_signed) { + result[i] = static_cast(src.Int(vformsrc, 2 * i) + + src.Int(vformsrc, 2 * i + 1)); + } else { + result[i] = src.Uint(vformsrc, 2 * i) + src.Uint(vformsrc, 2 * i + 1); + } + } + + dst.ClearForWrite(vform); + for (int i = 0; i < lane_count; ++i) { + if (do_accumulate) { + result[i] += dst.Uint(vform, i); + } + dst.SetUint(vform, i, result[i]); + } + + return dst; +} + + +LogicVRegister Simulator::saddlp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return addlp(vform, dst, src, true, false); +} + + +LogicVRegister Simulator::uaddlp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return addlp(vform, dst, src, false, false); +} + + +LogicVRegister Simulator::sadalp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return addlp(vform, dst, src, true, true); +} + + +LogicVRegister Simulator::uadalp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + return addlp(vform, dst, src, false, true); +} + + +LogicVRegister Simulator::ext(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + uint8_t result[16]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount - index; ++i) { + result[i] = src1.Uint(vform, i + index); + } + for (int i = 0; i < index; ++i) { + result[laneCount - index + i] = src2.Uint(vform, i); + } + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + +template +LogicVRegister Simulator::fcadd(VectorFormat vform, + LogicVRegister dst, // d + const LogicVRegister& src1, // n + const LogicVRegister& src2, // m + int rot) { + int elements = LaneCountFromFormat(vform); + + T element1, element3; + rot = (rot == 1) ? 270 : 90; + + // Loop example: + // 2S --> (2/2 = 1 - 1 = 0) --> 1 x Complex Number (2x components: r+i) + // 4S --> (4/2 = 2) - 1 = 1) --> 2 x Complex Number (2x2 components: r+i) + + for (int e = 0; e <= (elements / 2) - 1; e++) { + switch (rot) { + case 90: + element1 = FPNeg(src2.Float(e * 2 + 1)); + element3 = src2.Float(e * 2); + break; + case 270: + element1 = src2.Float(e * 2 + 1); + element3 = FPNeg(src2.Float(e * 2)); + break; + default: + VIXL_UNREACHABLE(); + return dst; // prevents "element(n) may be unintialized" errors + } + dst.ClearForWrite(vform); + dst.SetFloat(e * 2, FPAdd(src1.Float(e * 2), element1)); + dst.SetFloat(e * 2 + 1, FPAdd(src1.Float(e * 2 + 1), element3)); + } + return dst; +} + + +LogicVRegister Simulator::fcadd(VectorFormat vform, + LogicVRegister dst, // d + const LogicVRegister& src1, // n + const LogicVRegister& src2, // m + int rot) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + VIXL_UNIMPLEMENTED(); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fcadd(vform, dst, src1, src2, rot); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + fcadd(vform, dst, src1, src2, rot); + } + return dst; +} + + +template +LogicVRegister Simulator::fcmla(VectorFormat vform, + LogicVRegister dst, // d + const LogicVRegister& src1, // n + const LogicVRegister& src2, // m + int index, + int rot) { + int elements = LaneCountFromFormat(vform); + + T element1, element2, element3, element4; + rot *= 90; + + // Loop example: + // 2S --> (2/2 = 1 - 1 = 0) --> 1 x Complex Number (2x components: r+i) + // 4S --> (4/2 = 2) - 1 = 1) --> 2 x Complex Number (2x2 components: r+i) + + for (int e = 0; e <= (elements / 2) - 1; e++) { + switch (rot) { + case 0: + element1 = src2.Float(index * 2); + element2 = src1.Float(e * 2); + element3 = src2.Float(index * 2 + 1); + element4 = src1.Float(e * 2); + break; + case 90: + element1 = FPNeg(src2.Float(index * 2 + 1)); + element2 = src1.Float(e * 2 + 1); + element3 = src2.Float(index * 2); + element4 = src1.Float(e * 2 + 1); + break; + case 180: + element1 = FPNeg(src2.Float(index * 2)); + element2 = src1.Float(e * 2); + element3 = FPNeg(src2.Float(index * 2 + 1)); + element4 = src1.Float(e * 2); + break; + case 270: + element1 = src2.Float(index * 2 + 1); + element2 = src1.Float(e * 2 + 1); + element3 = FPNeg(src2.Float(index * 2)); + element4 = src1.Float(e * 2 + 1); + break; + default: + VIXL_UNREACHABLE(); + return dst; // prevents "element(n) may be unintialized" errors + } + dst.ClearForWrite(vform); + dst.SetFloat(e * 2, FPMulAdd(dst.Float(e * 2), element2, element1)); + dst.SetFloat(e * 2 + 1, + FPMulAdd(dst.Float(e * 2 + 1), element4, element3)); + } + return dst; +} + + +template +LogicVRegister Simulator::fcmla(VectorFormat vform, + LogicVRegister dst, // d + const LogicVRegister& src1, // n + const LogicVRegister& src2, // m + int rot) { + int elements = LaneCountFromFormat(vform); + + T element1, element2, element3, element4; + rot *= 90; + + // Loop example: + // 2S --> (2/2 = 1 - 1 = 0) --> 1 x Complex Number (2x components: r+i) + // 4S --> (4/2 = 2) - 1 = 1) --> 2 x Complex Number (2x2 components: r+i) + + for (int e = 0; e <= (elements / 2) - 1; e++) { + switch (rot) { + case 0: + element1 = src2.Float(e * 2); + element2 = src1.Float(e * 2); + element3 = src2.Float(e * 2 + 1); + element4 = src1.Float(e * 2); + break; + case 90: + element1 = FPNeg(src2.Float(e * 2 + 1)); + element2 = src1.Float(e * 2 + 1); + element3 = src2.Float(e * 2); + element4 = src1.Float(e * 2 + 1); + break; + case 180: + element1 = FPNeg(src2.Float(e * 2)); + element2 = src1.Float(e * 2); + element3 = FPNeg(src2.Float(e * 2 + 1)); + element4 = src1.Float(e * 2); + break; + case 270: + element1 = src2.Float(e * 2 + 1); + element2 = src1.Float(e * 2 + 1); + element3 = FPNeg(src2.Float(e * 2)); + element4 = src1.Float(e * 2 + 1); + break; + default: + VIXL_UNREACHABLE(); + return dst; // prevents "element(n) may be unintialized" errors + } + dst.ClearForWrite(vform); + dst.SetFloat(e * 2, FPMulAdd(dst.Float(e * 2), element2, element1)); + dst.SetFloat(e * 2 + 1, + FPMulAdd(dst.Float(e * 2 + 1), element4, element3)); + } + return dst; +} + + +LogicVRegister Simulator::fcmla(VectorFormat vform, + LogicVRegister dst, // d + const LogicVRegister& src1, // n + const LogicVRegister& src2, // m + int rot) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + VIXL_UNIMPLEMENTED(); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fcmla(vform, dst, src1, src2, rot); + } else { + fcmla(vform, dst, src1, src2, rot); + } + return dst; +} + + +LogicVRegister Simulator::fcmla(VectorFormat vform, + LogicVRegister dst, // d + const LogicVRegister& src1, // n + const LogicVRegister& src2, // m + int index, + int rot) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + VIXL_UNIMPLEMENTED(); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fcmla(vform, dst, src1, src2, index, rot); + } else { + fcmla(vform, dst, src1, src2, index, rot); + } + return dst; +} + + +LogicVRegister Simulator::dup_element(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int src_index) { + int laneCount = LaneCountFromFormat(vform); + uint64_t value = src.Uint(vform, src_index); + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, value); + } + return dst; +} + + +LogicVRegister Simulator::dup_immediate(VectorFormat vform, + LogicVRegister dst, + uint64_t imm) { + int laneCount = LaneCountFromFormat(vform); + uint64_t value = imm & MaxUintFromFormat(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, value); + } + return dst; +} + + +LogicVRegister Simulator::ins_element(VectorFormat vform, + LogicVRegister dst, + int dst_index, + const LogicVRegister& src, + int src_index) { + dst.SetUint(vform, dst_index, src.Uint(vform, src_index)); + return dst; +} + + +LogicVRegister Simulator::ins_immediate(VectorFormat vform, + LogicVRegister dst, + int dst_index, + uint64_t imm) { + uint64_t value = imm & MaxUintFromFormat(vform); + dst.SetUint(vform, dst_index, value); + return dst; +} + + +LogicVRegister Simulator::movi(VectorFormat vform, + LogicVRegister dst, + uint64_t imm) { + int laneCount = LaneCountFromFormat(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, imm); + } + return dst; +} + + +LogicVRegister Simulator::mvni(VectorFormat vform, + LogicVRegister dst, + uint64_t imm) { + int laneCount = LaneCountFromFormat(vform); + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, ~imm); + } + return dst; +} + + +LogicVRegister Simulator::orr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + uint64_t imm) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; ++i) { + result[i] = src.Uint(vform, i) | imm; + } + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::uxtl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_half = VectorFormatHalfWidth(vform); + + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetUint(vform, i, src.Uint(vform_half, i)); + } + return dst; +} + + +LogicVRegister Simulator::sxtl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_half = VectorFormatHalfWidth(vform); + + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetInt(vform, i, src.Int(vform_half, i)); + } + return dst; +} + + +LogicVRegister Simulator::uxtl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_half = VectorFormatHalfWidth(vform); + int lane_count = LaneCountFromFormat(vform); + + dst.ClearForWrite(vform); + for (int i = 0; i < lane_count; i++) { + dst.SetUint(vform, i, src.Uint(vform_half, lane_count + i)); + } + return dst; +} + + +LogicVRegister Simulator::sxtl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VectorFormat vform_half = VectorFormatHalfWidth(vform); + int lane_count = LaneCountFromFormat(vform); + + dst.ClearForWrite(vform); + for (int i = 0; i < lane_count; i++) { + dst.SetInt(vform, i, src.Int(vform_half, lane_count + i)); + } + return dst; +} + + +LogicVRegister Simulator::shrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vform_src = VectorFormatDoubleWidth(vform); + VectorFormat vform_dst = vform; + LogicVRegister shifted_src = ushr(vform_src, temp, src, shift); + return extractnarrow(vform_dst, dst, false, shifted_src, false); +} + + +LogicVRegister Simulator::shrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift); + return extractnarrow(vformdst, dst, false, shifted_src, false); +} + + +LogicVRegister Simulator::rshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift).Round(vformsrc); + return extractnarrow(vformdst, dst, false, shifted_src, false); +} + + +LogicVRegister Simulator::rshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift).Round(vformsrc); + return extractnarrow(vformdst, dst, false, shifted_src, false); +} + + +LogicVRegister Simulator::Table(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& ind, + bool zero_out_of_bounds, + const LogicVRegister* tab1, + const LogicVRegister* tab2, + const LogicVRegister* tab3, + const LogicVRegister* tab4) { + VIXL_ASSERT(tab1 != NULL); + const LogicVRegister* tab[4] = {tab1, tab2, tab3, tab4}; + uint64_t result[kMaxLanesPerVector]; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + result[i] = zero_out_of_bounds ? 0 : dst.Uint(kFormat16B, i); + } + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t j = ind.Uint(vform, i); + int tab_idx = static_cast(j >> 4); + int j_idx = static_cast(j & 15); + if ((tab_idx < 4) && (tab[tab_idx] != NULL)) { + result[i] = tab[tab_idx]->Uint(kFormat16B, j_idx); + } + } + dst.SetUintArray(vform, result); + return dst; +} + + +LogicVRegister Simulator::tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& ind) { + return Table(vform, dst, ind, true, &tab); +} + + +LogicVRegister Simulator::tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& ind) { + return Table(vform, dst, ind, true, &tab, &tab2); +} + + +LogicVRegister Simulator::tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& ind) { + return Table(vform, dst, ind, true, &tab, &tab2, &tab3); +} + + +LogicVRegister Simulator::tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& tab4, + const LogicVRegister& ind) { + return Table(vform, dst, ind, true, &tab, &tab2, &tab3, &tab4); +} + + +LogicVRegister Simulator::tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& ind) { + return Table(vform, dst, ind, false, &tab); +} + + +LogicVRegister Simulator::tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& ind) { + return Table(vform, dst, ind, false, &tab, &tab2); +} + + +LogicVRegister Simulator::tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& ind) { + return Table(vform, dst, ind, false, &tab, &tab2, &tab3); +} + + +LogicVRegister Simulator::tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& tab4, + const LogicVRegister& ind) { + return Table(vform, dst, ind, false, &tab, &tab2, &tab3, &tab4); +} + + +LogicVRegister Simulator::uqshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + return shrn(vform, dst, src, shift).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::uqshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + return shrn2(vform, dst, src, shift).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::uqrshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + return rshrn(vform, dst, src, shift).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::uqrshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + return rshrn2(vform, dst, src, shift).UnsignedSaturate(vform); +} + + +LogicVRegister Simulator::sqshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift); + return sqxtn(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift); + return sqxtn(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqrshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc); + return sqxtn(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqrshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc); + return sqxtn(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqshrun(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift); + return sqxtun(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqshrun2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift); + return sqxtun(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqrshrun(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(vform); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc); + return sqxtun(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::sqrshrun2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift) { + SimVRegister temp; + VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)); + VectorFormat vformdst = vform; + LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc); + return sqxtun(vformdst, dst, shifted_src); +} + + +LogicVRegister Simulator::uaddl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + add(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::uaddl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + add(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::uaddw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + uxtl(vform, temp, src2); + add(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::uaddw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + uxtl2(vform, temp, src2); + add(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::saddl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + add(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::saddl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + add(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::saddw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sxtl(vform, temp, src2); + add(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::saddw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sxtl2(vform, temp, src2); + add(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::usubl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + sub(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::usubl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + sub(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::usubw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + uxtl(vform, temp, src2); + sub(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::usubw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + uxtl2(vform, temp, src2); + sub(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::ssubl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + sub(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::ssubl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + sub(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::ssubw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sxtl(vform, temp, src2); + sub(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::ssubw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sxtl2(vform, temp, src2); + sub(vform, dst, src1, temp); + return dst; +} + + +LogicVRegister Simulator::uabal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + uaba(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::uabal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + uaba(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::sabal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + saba(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::sabal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + saba(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::uabdl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + absdiff(vform, dst, temp1, temp2, false); + return dst; +} + + +LogicVRegister Simulator::uabdl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + absdiff(vform, dst, temp1, temp2, false); + return dst; +} + + +LogicVRegister Simulator::sabdl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + absdiff(vform, dst, temp1, temp2, true); + return dst; +} + + +LogicVRegister Simulator::sabdl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + absdiff(vform, dst, temp1, temp2, true); + return dst; +} + + +LogicVRegister Simulator::umull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + mul(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::umull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + mul(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::smull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + mul(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::smull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + mul(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::umlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + mls(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::umlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + mls(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::smlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + mls(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::smlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + mls(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::umlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl(vform, temp1, src1); + uxtl(vform, temp2, src2); + mla(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::umlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + uxtl2(vform, temp1, src1); + uxtl2(vform, temp2, src2); + mla(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::smlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl(vform, temp1, src1); + sxtl(vform, temp2, src2); + mla(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::smlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp1, temp2; + sxtl2(vform, temp1, src1); + sxtl2(vform, temp2, src2); + mla(vform, dst, temp1, temp2); + return dst; +} + + +LogicVRegister Simulator::sqdmlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = sqdmull(vform, temp, src1, src2); + return add(vform, dst, dst, product).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqdmlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = sqdmull2(vform, temp, src1, src2); + return add(vform, dst, dst, product).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqdmlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = sqdmull(vform, temp, src1, src2); + return sub(vform, dst, dst, product).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqdmlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = sqdmull2(vform, temp, src1, src2); + return sub(vform, dst, dst, product).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqdmull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = smull(vform, temp, src1, src2); + return add(vform, dst, product, product).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqdmull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = smull2(vform, temp, src1, src2); + return add(vform, dst, product, product).SignedSaturate(vform); +} + + +LogicVRegister Simulator::sqrdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round) { + // 2 * INT_32_MIN * INT_32_MIN causes int64_t to overflow. + // To avoid this, we use (src1 * src2 + 1 << (esize - 2)) >> (esize - 1) + // which is same as (2 * src1 * src2 + 1 << (esize - 1)) >> esize. + + int esize = LaneSizeInBitsFromFormat(vform); + int round_const = round ? (1 << (esize - 2)) : 0; + int64_t product; + + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + product = src1.Int(vform, i) * src2.Int(vform, i); + product += round_const; + product = product >> (esize - 1); + + if (product > MaxIntFromFormat(vform)) { + product = MaxIntFromFormat(vform); + } else if (product < MinIntFromFormat(vform)) { + product = MinIntFromFormat(vform); + } + dst.SetInt(vform, i, product); + } + return dst; +} + + +LogicVRegister Simulator::dot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool is_signed) { + VectorFormat quarter_vform = + VectorFormatHalfWidthDoubleLanes(VectorFormatHalfWidthDoubleLanes(vform)); + + dst.ClearForWrite(vform); + for (int e = 0; e < LaneCountFromFormat(vform); e++) { + int64_t result = 0; + int64_t element1, element2; + for (int i = 0; i < 4; i++) { + int index = 4 * e + i; + if (is_signed) { + element1 = src1.Int(quarter_vform, index); + element2 = src2.Int(quarter_vform, index); + } else { + element1 = src1.Uint(quarter_vform, index); + element2 = src2.Uint(quarter_vform, index); + } + result += element1 * element2; + } + + result += dst.Int(vform, e); + dst.SetInt(vform, e, result); + } + return dst; +} + + +LogicVRegister Simulator::sdot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return dot(vform, dst, src1, src2, true); +} + + +LogicVRegister Simulator::udot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return dot(vform, dst, src1, src2, false); +} + + +LogicVRegister Simulator::sqrdmlash(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round, + bool sub_op) { + // 2 * INT_32_MIN * INT_32_MIN causes int64_t to overflow. + // To avoid this, we use: + // (dst << (esize - 1) + src1 * src2 + 1 << (esize - 2)) >> (esize - 1) + // which is same as: + // (dst << esize + 2 * src1 * src2 + 1 << (esize - 1)) >> esize. + + int esize = LaneSizeInBitsFromFormat(vform); + int round_const = round ? (1 << (esize - 2)) : 0; + int64_t accum; + + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + accum = dst.Int(vform, i) << (esize - 1); + if (sub_op) { + accum -= src1.Int(vform, i) * src2.Int(vform, i); + } else { + accum += src1.Int(vform, i) * src2.Int(vform, i); + } + accum += round_const; + accum = accum >> (esize - 1); + + if (accum > MaxIntFromFormat(vform)) { + accum = MaxIntFromFormat(vform); + } else if (accum < MinIntFromFormat(vform)) { + accum = MinIntFromFormat(vform); + } + dst.SetInt(vform, i, accum); + } + return dst; +} + + +LogicVRegister Simulator::sqrdmlah(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round) { + return sqrdmlash(vform, dst, src1, src2, round, false); +} + + +LogicVRegister Simulator::sqrdmlsh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round) { + return sqrdmlash(vform, dst, src1, src2, round, true); +} + + +LogicVRegister Simulator::sqdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + return sqrdmulh(vform, dst, src1, src2, false); +} + + +LogicVRegister Simulator::addhn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + add(VectorFormatDoubleWidth(vform), temp, src1, src2); + shrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::addhn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + add(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2); + shrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::raddhn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + add(VectorFormatDoubleWidth(vform), temp, src1, src2); + rshrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::raddhn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + add(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2); + rshrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::subhn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sub(VectorFormatDoubleWidth(vform), temp, src1, src2); + shrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::subhn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sub(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2); + shrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::rsubhn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sub(VectorFormatDoubleWidth(vform), temp, src1, src2); + rshrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::rsubhn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + sub(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2); + rshrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform)); + return dst; +} + + +LogicVRegister Simulator::trn1(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int pairs = laneCount / 2; + for (int i = 0; i < pairs; ++i) { + result[2 * i] = src1.Uint(vform, 2 * i); + result[(2 * i) + 1] = src2.Uint(vform, 2 * i); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::trn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int pairs = laneCount / 2; + for (int i = 0; i < pairs; ++i) { + result[2 * i] = src1.Uint(vform, (2 * i) + 1); + result[(2 * i) + 1] = src2.Uint(vform, (2 * i) + 1); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::zip1(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int pairs = laneCount / 2; + for (int i = 0; i < pairs; ++i) { + result[2 * i] = src1.Uint(vform, i); + result[(2 * i) + 1] = src2.Uint(vform, i); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::zip2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[16]; + int laneCount = LaneCountFromFormat(vform); + int pairs = laneCount / 2; + for (int i = 0; i < pairs; ++i) { + result[2 * i] = src1.Uint(vform, pairs + i); + result[(2 * i) + 1] = src2.Uint(vform, pairs + i); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[i]); + } + return dst; +} + + +LogicVRegister Simulator::uzp1(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[32]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; ++i) { + result[i] = src1.Uint(vform, i); + result[laneCount + i] = src2.Uint(vform, i); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[2 * i]); + } + return dst; +} + + +LogicVRegister Simulator::uzp2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + uint64_t result[32]; + int laneCount = LaneCountFromFormat(vform); + for (int i = 0; i < laneCount; ++i) { + result[i] = src1.Uint(vform, i); + result[laneCount + i] = src2.Uint(vform, i); + } + + dst.ClearForWrite(vform); + for (int i = 0; i < laneCount; ++i) { + dst.SetUint(vform, i, result[(2 * i) + 1]); + } + return dst; +} + + +template +T Simulator::FPNeg(T op) { + return -op; +} + +template +T Simulator::FPAdd(T op1, T op2) { + T result = FPProcessNaNs(op1, op2); + if (IsNaN(result)) { + return result; + } + + if (IsInf(op1) && IsInf(op2) && (op1 != op2)) { + // inf + -inf returns the default NaN. + FPProcessException(); + return FPDefaultNaN(); + } else { + // Other cases should be handled by standard arithmetic. + return op1 + op2; + } +} + + +template +T Simulator::FPSub(T op1, T op2) { + // NaNs should be handled elsewhere. + VIXL_ASSERT(!IsNaN(op1) && !IsNaN(op2)); + + if (IsInf(op1) && IsInf(op2) && (op1 == op2)) { + // inf - inf returns the default NaN. + FPProcessException(); + return FPDefaultNaN(); + } else { + // Other cases should be handled by standard arithmetic. + return op1 - op2; + } +} + + +template +T Simulator::FPMul(T op1, T op2) { + // NaNs should be handled elsewhere. + VIXL_ASSERT(!IsNaN(op1) && !IsNaN(op2)); + + if ((IsInf(op1) && (op2 == 0.0)) || (IsInf(op2) && (op1 == 0.0))) { + // inf * 0.0 returns the default NaN. + FPProcessException(); + return FPDefaultNaN(); + } else { + // Other cases should be handled by standard arithmetic. + return op1 * op2; + } +} + + +template +T Simulator::FPMulx(T op1, T op2) { + if ((IsInf(op1) && (op2 == 0.0)) || (IsInf(op2) && (op1 == 0.0))) { + // inf * 0.0 returns +/-2.0. + T two = 2.0; + return copysign(1.0, op1) * copysign(1.0, op2) * two; + } + return FPMul(op1, op2); +} + + +template +T Simulator::FPMulAdd(T a, T op1, T op2) { + T result = FPProcessNaNs3(a, op1, op2); + + T sign_a = copysign(1.0, a); + T sign_prod = copysign(1.0, op1) * copysign(1.0, op2); + bool isinf_prod = IsInf(op1) || IsInf(op2); + bool operation_generates_nan = + (IsInf(op1) && (op2 == 0.0)) || // inf * 0.0 + (IsInf(op2) && (op1 == 0.0)) || // 0.0 * inf + (IsInf(a) && isinf_prod && (sign_a != sign_prod)); // inf - inf + + if (IsNaN(result)) { + // Generated NaNs override quiet NaNs propagated from a. + if (operation_generates_nan && IsQuietNaN(a)) { + FPProcessException(); + return FPDefaultNaN(); + } else { + return result; + } + } + + // If the operation would produce a NaN, return the default NaN. + if (operation_generates_nan) { + FPProcessException(); + return FPDefaultNaN(); + } + + // Work around broken fma implementations for exact zero results: The sign of + // exact 0.0 results is positive unless both a and op1 * op2 are negative. + if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) { + return ((sign_a < T(0.0)) && (sign_prod < T(0.0))) ? -0.0 : 0.0; + } + + result = FusedMultiplyAdd(op1, op2, a); + VIXL_ASSERT(!IsNaN(result)); + + // Work around broken fma implementations for rounded zero results: If a is + // 0.0, the sign of the result is the sign of op1 * op2 before rounding. + if ((a == 0.0) && (result == 0.0)) { + return copysign(0.0, sign_prod); + } + + return result; +} + + +template +T Simulator::FPDiv(T op1, T op2) { + // NaNs should be handled elsewhere. + VIXL_ASSERT(!IsNaN(op1) && !IsNaN(op2)); + + if ((IsInf(op1) && IsInf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) { + // inf / inf and 0.0 / 0.0 return the default NaN. + FPProcessException(); + return FPDefaultNaN(); + } else { + if (op2 == 0.0) { + FPProcessException(); + if (!IsNaN(op1)) { + double op1_sign = copysign(1.0, op1); + double op2_sign = copysign(1.0, op2); + return static_cast(op1_sign * op2_sign * kFP64PositiveInfinity); + } + } + + // Other cases should be handled by standard arithmetic. + return op1 / op2; + } +} + + +template +T Simulator::FPSqrt(T op) { + if (IsNaN(op)) { + return FPProcessNaN(op); + } else if (op < T(0.0)) { + FPProcessException(); + return FPDefaultNaN(); + } else { + return sqrt(op); + } +} + + +template +T Simulator::FPMax(T a, T b) { + T result = FPProcessNaNs(a, b); + if (IsNaN(result)) return result; + + if ((a == 0.0) && (b == 0.0) && (copysign(1.0, a) != copysign(1.0, b))) { + // a and b are zero, and the sign differs: return +0.0. + return 0.0; + } else { + return (a > b) ? a : b; + } +} + + +template +T Simulator::FPMaxNM(T a, T b) { + if (IsQuietNaN(a) && !IsQuietNaN(b)) { + a = kFP64NegativeInfinity; + } else if (!IsQuietNaN(a) && IsQuietNaN(b)) { + b = kFP64NegativeInfinity; + } + + T result = FPProcessNaNs(a, b); + return IsNaN(result) ? result : FPMax(a, b); +} + + +template +T Simulator::FPMin(T a, T b) { + T result = FPProcessNaNs(a, b); + if (IsNaN(result)) return result; + + if ((a == 0.0) && (b == 0.0) && (copysign(1.0, a) != copysign(1.0, b))) { + // a and b are zero, and the sign differs: return -0.0. + return -0.0; + } else { + return (a < b) ? a : b; + } +} + + +template +T Simulator::FPMinNM(T a, T b) { + if (IsQuietNaN(a) && !IsQuietNaN(b)) { + a = kFP64PositiveInfinity; + } else if (!IsQuietNaN(a) && IsQuietNaN(b)) { + b = kFP64PositiveInfinity; + } + + T result = FPProcessNaNs(a, b); + return IsNaN(result) ? result : FPMin(a, b); +} + + +template +T Simulator::FPRecipStepFused(T op1, T op2) { + const T two = 2.0; + if ((IsInf(op1) && (op2 == 0.0)) || ((op1 == 0.0) && (IsInf(op2)))) { + return two; + } else if (IsInf(op1) || IsInf(op2)) { + // Return +inf if signs match, otherwise -inf. + return ((op1 >= 0.0) == (op2 >= 0.0)) ? kFP64PositiveInfinity + : kFP64NegativeInfinity; + } else { + return FusedMultiplyAdd(op1, op2, two); + } +} + +template +bool IsNormal(T value) { + return std::isnormal(value); +} + +template <> +bool IsNormal(SimFloat16 value) { + uint16_t rawbits = Float16ToRawbits(value); + uint16_t exp_mask = 0x7c00; + // Check that the exponent is neither all zeroes or all ones. + return ((rawbits & exp_mask) != 0) && ((~rawbits & exp_mask) != 0); +} + + +template +T Simulator::FPRSqrtStepFused(T op1, T op2) { + const T one_point_five = 1.5; + const T two = 2.0; + + if ((IsInf(op1) && (op2 == 0.0)) || ((op1 == 0.0) && (IsInf(op2)))) { + return one_point_five; + } else if (IsInf(op1) || IsInf(op2)) { + // Return +inf if signs match, otherwise -inf. + return ((op1 >= 0.0) == (op2 >= 0.0)) ? kFP64PositiveInfinity + : kFP64NegativeInfinity; + } else { + // The multiply-add-halve operation must be fully fused, so avoid interim + // rounding by checking which operand can be losslessly divided by two + // before doing the multiply-add. + if (IsNormal(op1 / two)) { + return FusedMultiplyAdd(op1 / two, op2, one_point_five); + } else if (IsNormal(op2 / two)) { + return FusedMultiplyAdd(op1, op2 / two, one_point_five); + } else { + // Neither operand is normal after halving: the result is dominated by + // the addition term, so just return that. + return one_point_five; + } + } +} + +int32_t Simulator::FPToFixedJS(double value) { + // The Z-flag is set when the conversion from double precision floating-point + // to 32-bit integer is exact. If the source value is +/-Infinity, -0.0, NaN, + // outside the bounds of a 32-bit integer, or isn't an exact integer then the + // Z-flag is unset. + int Z = 1; + int32_t result; + + if ((value == 0.0) || (value == kFP64PositiveInfinity) || + (value == kFP64NegativeInfinity)) { + // +/- zero and infinity all return zero, however -0 and +/- Infinity also + // unset the Z-flag. + result = 0.0; + if ((value != 0.0) || std::signbit(value)) { + Z = 0; + } + } else if (std::isnan(value)) { + // NaN values unset the Z-flag and set the result to 0. + FPProcessNaN(value); + result = 0; + Z = 0; + } else { + // All other values are converted to an integer representation, rounded + // toward zero. + double int_result = std::floor(value); + double error = value - int_result; + + if ((error != 0.0) && (int_result < 0.0)) { + int_result++; + } + + // Constrain the value into the range [INT32_MIN, INT32_MAX]. We can almost + // write a one-liner with std::round, but the behaviour on ties is incorrect + // for our purposes. + double mod_const = static_cast(UINT64_C(1) << 32); + double mod_error = + (int_result / mod_const) - std::floor(int_result / mod_const); + double constrained; + if (mod_error == 0.5) { + constrained = INT32_MIN; + } else { + constrained = int_result - mod_const * round(int_result / mod_const); + } + + VIXL_ASSERT(std::floor(constrained) == constrained); + VIXL_ASSERT(constrained >= INT32_MIN); + VIXL_ASSERT(constrained <= INT32_MAX); + + // Take the bottom 32 bits of the result as a 32-bit integer. + result = static_cast(constrained); + + if ((int_result < INT32_MIN) || (int_result > INT32_MAX) || + (error != 0.0)) { + // If the integer result is out of range or the conversion isn't exact, + // take exception and unset the Z-flag. + FPProcessException(); + Z = 0; + } + } + + ReadNzcv().SetN(0); + ReadNzcv().SetZ(Z); + ReadNzcv().SetC(0); + ReadNzcv().SetV(0); + + return result; +} + + +double Simulator::FPRoundInt(double value, FPRounding round_mode) { + if ((value == 0.0) || (value == kFP64PositiveInfinity) || + (value == kFP64NegativeInfinity)) { + return value; + } else if (IsNaN(value)) { + return FPProcessNaN(value); + } + + double int_result = std::floor(value); + double error = value - int_result; + switch (round_mode) { + case FPTieAway: { + // Take care of correctly handling the range ]-0.5, -0.0], which must + // yield -0.0. + if ((-0.5 < value) && (value < 0.0)) { + int_result = -0.0; + + } else if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) { + // If the error is greater than 0.5, or is equal to 0.5 and the integer + // result is positive, round up. + int_result++; + } + break; + } + case FPTieEven: { + // Take care of correctly handling the range [-0.5, -0.0], which must + // yield -0.0. + if ((-0.5 <= value) && (value < 0.0)) { + int_result = -0.0; + + // If the error is greater than 0.5, or is equal to 0.5 and the integer + // result is odd, round up. + } else if ((error > 0.5) || + ((error == 0.5) && (std::fmod(int_result, 2) != 0))) { + int_result++; + } + break; + } + case FPZero: { + // If value>0 then we take floor(value) + // otherwise, ceil(value). + if (value < 0) { + int_result = ceil(value); + } + break; + } + case FPNegativeInfinity: { + // We always use floor(value). + break; + } + case FPPositiveInfinity: { + // Take care of correctly handling the range ]-1.0, -0.0], which must + // yield -0.0. + if ((-1.0 < value) && (value < 0.0)) { + int_result = -0.0; + + // If the error is non-zero, round up. + } else if (error > 0.0) { + int_result++; + } + break; + } + default: + VIXL_UNIMPLEMENTED(); + } + return int_result; +} + + +int16_t Simulator::FPToInt16(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kHMaxInt) { + return kHMaxInt; + } else if (value < kHMinInt) { + return kHMinInt; + } + return IsNaN(value) ? 0 : static_cast(value); +} + + +int32_t Simulator::FPToInt32(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kWMaxInt) { + return kWMaxInt; + } else if (value < kWMinInt) { + return kWMinInt; + } + return IsNaN(value) ? 0 : static_cast(value); +} + + +int64_t Simulator::FPToInt64(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kXMaxInt) { + return kXMaxInt; + } else if (value < kXMinInt) { + return kXMinInt; + } + return IsNaN(value) ? 0 : static_cast(value); +} + + +uint16_t Simulator::FPToUInt16(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kHMaxUInt) { + return kHMaxUInt; + } else if (value < 0.0) { + return 0; + } + return IsNaN(value) ? 0 : static_cast(value); +} + + +uint32_t Simulator::FPToUInt32(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kWMaxUInt) { + return kWMaxUInt; + } else if (value < 0.0) { + return 0; + } + return IsNaN(value) ? 0 : static_cast(value); +} + + +uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) { + value = FPRoundInt(value, rmode); + if (value >= kXMaxUInt) { + return kXMaxUInt; + } else if (value < 0.0) { + return 0; + } + return IsNaN(value) ? 0 : static_cast(value); +} + + +#define DEFINE_NEON_FP_VECTOR_OP(FN, OP, PROCNAN) \ + template \ + LogicVRegister Simulator::FN(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2) { \ + dst.ClearForWrite(vform); \ + for (int i = 0; i < LaneCountFromFormat(vform); i++) { \ + T op1 = src1.Float(i); \ + T op2 = src2.Float(i); \ + T result; \ + if (PROCNAN) { \ + result = FPProcessNaNs(op1, op2); \ + if (!IsNaN(result)) { \ + result = OP(op1, op2); \ + } \ + } else { \ + result = OP(op1, op2); \ + } \ + dst.SetFloat(i, result); \ + } \ + return dst; \ + } \ + \ + LogicVRegister Simulator::FN(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2) { \ + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { \ + FN(vform, dst, src1, src2); \ + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { \ + FN(vform, dst, src1, src2); \ + } else { \ + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); \ + FN(vform, dst, src1, src2); \ + } \ + return dst; \ + } +NEON_FP3SAME_LIST(DEFINE_NEON_FP_VECTOR_OP) +#undef DEFINE_NEON_FP_VECTOR_OP + + +LogicVRegister Simulator::fnmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + LogicVRegister product = fmul(vform, temp, src1, src2); + return fneg(vform, dst, product); +} + + +template +LogicVRegister Simulator::frecps(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op1 = -src1.Float(i); + T op2 = src2.Float(i); + T result = FPProcessNaNs(op1, op2); + dst.SetFloat(i, IsNaN(result) ? result : FPRecipStepFused(op1, op2)); + } + return dst; +} + + +LogicVRegister Simulator::frecps(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + frecps(vform, dst, src1, src2); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + frecps(vform, dst, src1, src2); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + frecps(vform, dst, src1, src2); + } + return dst; +} + + +template +LogicVRegister Simulator::frsqrts(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op1 = -src1.Float(i); + T op2 = src2.Float(i); + T result = FPProcessNaNs(op1, op2); + dst.SetFloat(i, IsNaN(result) ? result : FPRSqrtStepFused(op1, op2)); + } + return dst; +} + + +LogicVRegister Simulator::frsqrts(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + frsqrts(vform, dst, src1, src2); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + frsqrts(vform, dst, src1, src2); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + frsqrts(vform, dst, src1, src2); + } + return dst; +} + + +template +LogicVRegister Simulator::fcmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + bool result = false; + T op1 = src1.Float(i); + T op2 = src2.Float(i); + T nan_result = FPProcessNaNs(op1, op2); + if (!IsNaN(nan_result)) { + switch (cond) { + case eq: + result = (op1 == op2); + break; + case ge: + result = (op1 >= op2); + break; + case gt: + result = (op1 > op2); + break; + case le: + result = (op1 <= op2); + break; + case lt: + result = (op1 < op2); + break; + default: + VIXL_UNREACHABLE(); + break; + } + } + dst.SetUint(vform, i, result ? MaxUintFromFormat(vform) : 0); + } + return dst; +} + + +LogicVRegister Simulator::fcmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + fcmp(vform, dst, src1, src2, cond); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fcmp(vform, dst, src1, src2, cond); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + fcmp(vform, dst, src1, src2, cond); + } + return dst; +} + + +LogicVRegister Simulator::fcmp_zero(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + Condition cond) { + SimVRegister temp; + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + LogicVRegister zero_reg = + dup_immediate(vform, temp, Float16ToRawbits(SimFloat16(0.0))); + fcmp(vform, dst, src, zero_reg, cond); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + LogicVRegister zero_reg = dup_immediate(vform, temp, FloatToRawbits(0.0)); + fcmp(vform, dst, src, zero_reg, cond); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + LogicVRegister zero_reg = dup_immediate(vform, temp, DoubleToRawbits(0.0)); + fcmp(vform, dst, src, zero_reg, cond); + } + return dst; +} + + +LogicVRegister Simulator::fabscmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond) { + SimVRegister temp1, temp2; + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + LogicVRegister abs_src1 = fabs_(vform, temp1, src1); + LogicVRegister abs_src2 = fabs_(vform, temp2, src2); + fcmp(vform, dst, abs_src1, abs_src2, cond); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + LogicVRegister abs_src1 = fabs_(vform, temp1, src1); + LogicVRegister abs_src2 = fabs_(vform, temp2, src2); + fcmp(vform, dst, abs_src1, abs_src2, cond); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + LogicVRegister abs_src1 = fabs_(vform, temp1, src1); + LogicVRegister abs_src2 = fabs_(vform, temp2, src2); + fcmp(vform, dst, abs_src1, abs_src2, cond); + } + return dst; +} + + +template +LogicVRegister Simulator::fmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op1 = src1.Float(i); + T op2 = src2.Float(i); + T acc = dst.Float(i); + T result = FPMulAdd(acc, op1, op2); + dst.SetFloat(i, result); + } + return dst; +} + + +LogicVRegister Simulator::fmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + fmla(vform, dst, src1, src2); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fmla(vform, dst, src1, src2); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + fmla(vform, dst, src1, src2); + } + return dst; +} + + +template +LogicVRegister Simulator::fmls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op1 = -src1.Float(i); + T op2 = src2.Float(i); + T acc = dst.Float(i); + T result = FPMulAdd(acc, op1, op2); + dst.SetFloat(i, result); + } + return dst; +} + + +LogicVRegister Simulator::fmls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + fmls(vform, dst, src1, src2); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fmls(vform, dst, src1, src2); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + fmls(vform, dst, src1, src2); + } + return dst; +} + + +LogicVRegister Simulator::fmlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float op1 = FPToFloat(src1.Float(i), kIgnoreDefaultNaN); + float op2 = FPToFloat(src2.Float(i), kIgnoreDefaultNaN); + float acc = dst.Float(i); + float result = FPMulAdd(acc, op1, op2); + dst.SetFloat(i, result); + } + return dst; +} + + +LogicVRegister Simulator::fmlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int src = i + LaneCountFromFormat(vform); + float op1 = FPToFloat(src1.Float(src), kIgnoreDefaultNaN); + float op2 = FPToFloat(src2.Float(src), kIgnoreDefaultNaN); + float acc = dst.Float(i); + float result = FPMulAdd(acc, op1, op2); + dst.SetFloat(i, result); + } + return dst; +} + + +LogicVRegister Simulator::fmlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float op1 = -FPToFloat(src1.Float(i), kIgnoreDefaultNaN); + float op2 = FPToFloat(src2.Float(i), kIgnoreDefaultNaN); + float acc = dst.Float(i); + float result = FPMulAdd(acc, op1, op2); + dst.SetFloat(i, result); + } + return dst; +} + + +LogicVRegister Simulator::fmlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int src = i + LaneCountFromFormat(vform); + float op1 = -FPToFloat(src1.Float(src), kIgnoreDefaultNaN); + float op2 = FPToFloat(src2.Float(src), kIgnoreDefaultNaN); + float acc = dst.Float(i); + float result = FPMulAdd(acc, op1, op2); + dst.SetFloat(i, result); + } + return dst; +} + + +LogicVRegister Simulator::fmlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + dst.ClearForWrite(vform); + float op2 = FPToFloat(src2.Float(index), kIgnoreDefaultNaN); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float op1 = FPToFloat(src1.Float(i), kIgnoreDefaultNaN); + float acc = dst.Float(i); + float result = FPMulAdd(acc, op1, op2); + dst.SetFloat(i, result); + } + return dst; +} + + +LogicVRegister Simulator::fmlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + dst.ClearForWrite(vform); + float op2 = FPToFloat(src2.Float(index), kIgnoreDefaultNaN); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int src = i + LaneCountFromFormat(vform); + float op1 = FPToFloat(src1.Float(src), kIgnoreDefaultNaN); + float acc = dst.Float(i); + float result = FPMulAdd(acc, op1, op2); + dst.SetFloat(i, result); + } + return dst; +} + + +LogicVRegister Simulator::fmlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + dst.ClearForWrite(vform); + float op2 = FPToFloat(src2.Float(index), kIgnoreDefaultNaN); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float op1 = -FPToFloat(src1.Float(i), kIgnoreDefaultNaN); + float acc = dst.Float(i); + float result = FPMulAdd(acc, op1, op2); + dst.SetFloat(i, result); + } + return dst; +} + + +LogicVRegister Simulator::fmlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + dst.ClearForWrite(vform); + float op2 = FPToFloat(src2.Float(index), kIgnoreDefaultNaN); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int src = i + LaneCountFromFormat(vform); + float op1 = -FPToFloat(src1.Float(src), kIgnoreDefaultNaN); + float acc = dst.Float(i); + float result = FPMulAdd(acc, op1, op2); + dst.SetFloat(i, result); + } + return dst; +} + + +template +LogicVRegister Simulator::fneg(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op = src.Float(i); + op = -op; + dst.SetFloat(i, op); + } + return dst; +} + + +LogicVRegister Simulator::fneg(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + fneg(vform, dst, src); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fneg(vform, dst, src); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + fneg(vform, dst, src); + } + return dst; +} + + +template +LogicVRegister Simulator::fabs_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op = src.Float(i); + if (copysign(1.0, op) < 0.0) { + op = -op; + } + dst.SetFloat(i, op); + } + return dst; +} + + +LogicVRegister Simulator::fabs_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + fabs_(vform, dst, src); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + fabs_(vform, dst, src); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + fabs_(vform, dst, src); + } + return dst; +} + + +LogicVRegister Simulator::fabd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2) { + SimVRegister temp; + fsub(vform, temp, src1, src2); + fabs_(vform, dst, temp); + return dst; +} + + +LogicVRegister Simulator::fsqrt(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SimFloat16 result = FPSqrt(src.Float(i)); + dst.SetFloat(i, result); + } + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float result = FPSqrt(src.Float(i)); + dst.SetFloat(i, result); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double result = FPSqrt(src.Float(i)); + dst.SetFloat(i, result); + } + } + return dst; +} + + +#define DEFINE_NEON_FP_PAIR_OP(FNP, FN, OP) \ + LogicVRegister Simulator::FNP(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2) { \ + SimVRegister temp1, temp2; \ + uzp1(vform, temp1, src1, src2); \ + uzp2(vform, temp2, src1, src2); \ + FN(vform, dst, temp1, temp2); \ + return dst; \ + } \ + \ + LogicVRegister Simulator::FNP(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src) { \ + if (vform == kFormatH) { \ + SimFloat16 result(OP(SimFloat16(RawbitsToFloat16(src.Uint(vform, 0))), \ + SimFloat16(RawbitsToFloat16(src.Uint(vform, 1))))); \ + dst.SetUint(vform, 0, Float16ToRawbits(result)); \ + } else if (vform == kFormatS) { \ + float result = OP(src.Float(0), src.Float(1)); \ + dst.SetFloat(0, result); \ + } else { \ + VIXL_ASSERT(vform == kFormatD); \ + double result = OP(src.Float(0), src.Float(1)); \ + dst.SetFloat(0, result); \ + } \ + dst.ClearForWrite(vform); \ + return dst; \ + } +NEON_FPPAIRWISE_LIST(DEFINE_NEON_FP_PAIR_OP) +#undef DEFINE_NEON_FP_PAIR_OP + +template +LogicVRegister Simulator::fminmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + typename TFPMinMaxOp::type Op) { + VIXL_ASSERT((vform == kFormat4H) || (vform == kFormat8H) || + (vform == kFormat4S)); + USE(vform); + T result1 = (this->*Op)(src.Float(0), src.Float(1)); + T result2 = (this->*Op)(src.Float(2), src.Float(3)); + if (vform == kFormat8H) { + T result3 = (this->*Op)(src.Float(4), src.Float(5)); + T result4 = (this->*Op)(src.Float(6), src.Float(7)); + result1 = (this->*Op)(result1, result3); + result2 = (this->*Op)(result2, result4); + } + T result = (this->*Op)(result1, result2); + dst.ClearForWrite(ScalarFormatFromFormat(vform)); + dst.SetFloat(0, result); + return dst; +} + + +LogicVRegister Simulator::fmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + return fminmaxv(vform, dst, src, &Simulator::FPMax); + } else { + return fminmaxv(vform, dst, src, &Simulator::FPMax); + } +} + + +LogicVRegister Simulator::fminv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + return fminmaxv(vform, dst, src, &Simulator::FPMin); + } else { + return fminmaxv(vform, dst, src, &Simulator::FPMin); + } +} + + +LogicVRegister Simulator::fmaxnmv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + return fminmaxv(vform, + dst, + src, + &Simulator::FPMaxNM); + } else { + return fminmaxv(vform, dst, src, &Simulator::FPMaxNM); + } +} + + +LogicVRegister Simulator::fminnmv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + return fminmaxv(vform, + dst, + src, + &Simulator::FPMinNM); + } else { + return fminmaxv(vform, dst, src, &Simulator::FPMinNM); + } +} + + +LogicVRegister Simulator::fmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + dst.ClearForWrite(vform); + SimVRegister temp; + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + LogicVRegister index_reg = dup_element(kFormat8H, temp, src2, index); + fmul(vform, dst, src1, index_reg); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index); + fmul(vform, dst, src1, index_reg); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index); + fmul(vform, dst, src1, index_reg); + } + return dst; +} + + +LogicVRegister Simulator::fmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + dst.ClearForWrite(vform); + SimVRegister temp; + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + LogicVRegister index_reg = dup_element(kFormat8H, temp, src2, index); + fmla(vform, dst, src1, index_reg); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index); + fmla(vform, dst, src1, index_reg); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index); + fmla(vform, dst, src1, index_reg); + } + return dst; +} + + +LogicVRegister Simulator::fmls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + dst.ClearForWrite(vform); + SimVRegister temp; + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + LogicVRegister index_reg = dup_element(kFormat8H, temp, src2, index); + fmls(vform, dst, src1, index_reg); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index); + fmls(vform, dst, src1, index_reg); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index); + fmls(vform, dst, src1, index_reg); + } + return dst; +} + + +LogicVRegister Simulator::fmulx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index) { + dst.ClearForWrite(vform); + SimVRegister temp; + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + LogicVRegister index_reg = dup_element(kFormat8H, temp, src2, index); + fmulx(vform, dst, src1, index_reg); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index); + fmulx(vform, dst, src1, index_reg); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index); + fmulx(vform, dst, src1, index_reg); + } + return dst; +} + + +LogicVRegister Simulator::frint(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + bool inexact_exception) { + dst.ClearForWrite(vform); + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SimFloat16 input = src.Float(i); + SimFloat16 rounded = FPRoundInt(input, rounding_mode); + if (inexact_exception && !IsNaN(input) && (input != rounded)) { + FPProcessException(); + } + dst.SetFloat(i, rounded); + } + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float input = src.Float(i); + float rounded = FPRoundInt(input, rounding_mode); + if (inexact_exception && !IsNaN(input) && (input != rounded)) { + FPProcessException(); + } + dst.SetFloat(i, rounded); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double input = src.Float(i); + double rounded = FPRoundInt(input, rounding_mode); + if (inexact_exception && !IsNaN(input) && (input != rounded)) { + FPProcessException(); + } + dst.SetFloat(i, rounded); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvts(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + int fbits) { + dst.ClearForWrite(vform); + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SimFloat16 op = + static_cast(src.Float(i)) * std::pow(2.0, fbits); + dst.SetInt(vform, i, FPToInt16(op, rounding_mode)); + } + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float op = src.Float(i) * std::pow(2.0f, fbits); + dst.SetInt(vform, i, FPToInt32(op, rounding_mode)); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double op = src.Float(i) * std::pow(2.0, fbits); + dst.SetInt(vform, i, FPToInt64(op, rounding_mode)); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvtu(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + int fbits) { + dst.ClearForWrite(vform); + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SimFloat16 op = + static_cast(src.Float(i)) * std::pow(2.0, fbits); + dst.SetUint(vform, i, FPToUInt16(op, rounding_mode)); + } + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float op = src.Float(i) * std::pow(2.0f, fbits); + dst.SetUint(vform, i, FPToUInt32(op, rounding_mode)); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double op = src.Float(i) * std::pow(2.0, fbits); + dst.SetUint(vform, i, FPToUInt64(op, rounding_mode)); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvtl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) { + // TODO: Full support for SimFloat16 in SimRegister(s). + dst.SetFloat(i, + FPToFloat(RawbitsToFloat16(src.Float(i)), + ReadDN())); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) { + dst.SetFloat(i, FPToDouble(src.Float(i), ReadDN())); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvtl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + int lane_count = LaneCountFromFormat(vform); + if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < lane_count; i++) { + // TODO: Full support for SimFloat16 in SimRegister(s). + dst.SetFloat(i, + FPToFloat(RawbitsToFloat16( + src.Float(i + lane_count)), + ReadDN())); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < lane_count; i++) { + dst.SetFloat(i, FPToDouble(src.Float(i + lane_count), ReadDN())); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetFloat(i, + Float16ToRawbits( + FPToFloat16(src.Float(i), FPTieEven, ReadDN()))); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetFloat(i, FPToFloat(src.Float(i), FPTieEven, ReadDN())); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvtn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + int lane_count = LaneCountFromFormat(vform) / 2; + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + for (int i = lane_count - 1; i >= 0; i--) { + dst.SetFloat(i + lane_count, + Float16ToRawbits( + FPToFloat16(src.Float(i), FPTieEven, ReadDN()))); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + for (int i = lane_count - 1; i >= 0; i--) { + dst.SetFloat(i + lane_count, + FPToFloat(src.Float(i), FPTieEven, ReadDN())); + } + } + return dst; +} + + +LogicVRegister Simulator::fcvtxn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + dst.SetFloat(i, FPToFloat(src.Float(i), FPRoundOdd, ReadDN())); + } + return dst; +} + + +LogicVRegister Simulator::fcvtxn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize); + int lane_count = LaneCountFromFormat(vform) / 2; + for (int i = lane_count - 1; i >= 0; i--) { + dst.SetFloat(i + lane_count, + FPToFloat(src.Float(i), FPRoundOdd, ReadDN())); + } + return dst; +} + + +// Based on reference C function recip_sqrt_estimate from ARM ARM. +double Simulator::recip_sqrt_estimate(double a) { + int q0, q1, s; + double r; + if (a < 0.5) { + q0 = static_cast(a * 512.0); + r = 1.0 / sqrt((static_cast(q0) + 0.5) / 512.0); + } else { + q1 = static_cast(a * 256.0); + r = 1.0 / sqrt((static_cast(q1) + 0.5) / 256.0); + } + s = static_cast(256.0 * r + 0.5); + return static_cast(s) / 256.0; +} + + +static inline uint64_t Bits(uint64_t val, int start_bit, int end_bit) { + return ExtractUnsignedBitfield64(start_bit, end_bit, val); +} + + +template +T Simulator::FPRecipSqrtEstimate(T op) { + if (IsNaN(op)) { + return FPProcessNaN(op); + } else if (op == 0.0) { + if (copysign(1.0, op) < 0.0) { + return kFP64NegativeInfinity; + } else { + return kFP64PositiveInfinity; + } + } else if (copysign(1.0, op) < 0.0) { + FPProcessException(); + return FPDefaultNaN(); + } else if (IsInf(op)) { + return 0.0; + } else { + uint64_t fraction; + int exp, result_exp; + + if (IsFloat16()) { + exp = Float16Exp(op); + fraction = Float16Mantissa(op); + fraction <<= 42; + } else if (IsFloat32()) { + exp = FloatExp(op); + fraction = FloatMantissa(op); + fraction <<= 29; + } else { + VIXL_ASSERT(IsFloat64()); + exp = DoubleExp(op); + fraction = DoubleMantissa(op); + } + + if (exp == 0) { + while (Bits(fraction, 51, 51) == 0) { + fraction = Bits(fraction, 50, 0) << 1; + exp -= 1; + } + fraction = Bits(fraction, 50, 0) << 1; + } + + double scaled; + if (Bits(exp, 0, 0) == 0) { + scaled = DoublePack(0, 1022, Bits(fraction, 51, 44) << 44); + } else { + scaled = DoublePack(0, 1021, Bits(fraction, 51, 44) << 44); + } + + if (IsFloat16()) { + result_exp = (44 - exp) / 2; + } else if (IsFloat32()) { + result_exp = (380 - exp) / 2; + } else { + VIXL_ASSERT(IsFloat64()); + result_exp = (3068 - exp) / 2; + } + + uint64_t estimate = DoubleToRawbits(recip_sqrt_estimate(scaled)); + + if (IsFloat16()) { + uint16_t exp_bits = static_cast(Bits(result_exp, 4, 0)); + uint16_t est_bits = static_cast(Bits(estimate, 51, 42)); + return Float16Pack(0, exp_bits, est_bits); + } else if (IsFloat32()) { + uint32_t exp_bits = static_cast(Bits(result_exp, 7, 0)); + uint32_t est_bits = static_cast(Bits(estimate, 51, 29)); + return FloatPack(0, exp_bits, est_bits); + } else { + VIXL_ASSERT(IsFloat64()); + return DoublePack(0, Bits(result_exp, 10, 0), Bits(estimate, 51, 0)); + } + } +} + + +LogicVRegister Simulator::frsqrte(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SimFloat16 input = src.Float(i); + dst.SetFloat(i, FPRecipSqrtEstimate(input)); + } + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float input = src.Float(i); + dst.SetFloat(i, FPRecipSqrtEstimate(input)); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double input = src.Float(i); + dst.SetFloat(i, FPRecipSqrtEstimate(input)); + } + } + return dst; +} + +template +T Simulator::FPRecipEstimate(T op, FPRounding rounding) { + uint32_t sign; + + if (IsFloat16()) { + sign = Float16Sign(op); + } else if (IsFloat32()) { + sign = FloatSign(op); + } else { + VIXL_ASSERT(IsFloat64()); + sign = DoubleSign(op); + } + + if (IsNaN(op)) { + return FPProcessNaN(op); + } else if (IsInf(op)) { + return (sign == 1) ? -0.0 : 0.0; + } else if (op == 0.0) { + FPProcessException(); // FPExc_DivideByZero exception. + return (sign == 1) ? kFP64NegativeInfinity : kFP64PositiveInfinity; + } else if ((IsFloat16() && (std::fabs(op) < std::pow(2.0, -16.0))) || + (IsFloat32() && (std::fabs(op) < std::pow(2.0, -128.0))) || + (IsFloat64() && (std::fabs(op) < std::pow(2.0, -1024.0)))) { + bool overflow_to_inf = false; + switch (rounding) { + case FPTieEven: + overflow_to_inf = true; + break; + case FPPositiveInfinity: + overflow_to_inf = (sign == 0); + break; + case FPNegativeInfinity: + overflow_to_inf = (sign == 1); + break; + case FPZero: + overflow_to_inf = false; + break; + default: + break; + } + FPProcessException(); // FPExc_Overflow and FPExc_Inexact. + if (overflow_to_inf) { + return (sign == 1) ? kFP64NegativeInfinity : kFP64PositiveInfinity; + } else { + // Return FPMaxNormal(sign). + if (IsFloat16()) { + return Float16Pack(sign, 0x1f, 0x3ff); + } else if (IsFloat32()) { + return FloatPack(sign, 0xfe, 0x07fffff); + } else { + VIXL_ASSERT(IsFloat64()); + return DoublePack(sign, 0x7fe, 0x0fffffffffffffl); + } + } + } else { + uint64_t fraction; + int exp, result_exp; + uint32_t sign; + + if (IsFloat16()) { + sign = Float16Sign(op); + exp = Float16Exp(op); + fraction = Float16Mantissa(op); + fraction <<= 42; + } else if (IsFloat32()) { + sign = FloatSign(op); + exp = FloatExp(op); + fraction = FloatMantissa(op); + fraction <<= 29; + } else { + VIXL_ASSERT(IsFloat64()); + sign = DoubleSign(op); + exp = DoubleExp(op); + fraction = DoubleMantissa(op); + } + + if (exp == 0) { + if (Bits(fraction, 51, 51) == 0) { + exp -= 1; + fraction = Bits(fraction, 49, 0) << 2; + } else { + fraction = Bits(fraction, 50, 0) << 1; + } + } + + double scaled = DoublePack(0, 1022, Bits(fraction, 51, 44) << 44); + + if (IsFloat16()) { + result_exp = (29 - exp); // In range 29-30 = -1 to 29+1 = 30. + } else if (IsFloat32()) { + result_exp = (253 - exp); // In range 253-254 = -1 to 253+1 = 254. + } else { + VIXL_ASSERT(IsFloat64()); + result_exp = (2045 - exp); // In range 2045-2046 = -1 to 2045+1 = 2046. + } + + double estimate = recip_estimate(scaled); + + fraction = DoubleMantissa(estimate); + if (result_exp == 0) { + fraction = (UINT64_C(1) << 51) | Bits(fraction, 51, 1); + } else if (result_exp == -1) { + fraction = (UINT64_C(1) << 50) | Bits(fraction, 51, 2); + result_exp = 0; + } + if (IsFloat16()) { + uint16_t exp_bits = static_cast(Bits(result_exp, 4, 0)); + uint16_t frac_bits = static_cast(Bits(fraction, 51, 42)); + return Float16Pack(sign, exp_bits, frac_bits); + } else if (IsFloat32()) { + uint32_t exp_bits = static_cast(Bits(result_exp, 7, 0)); + uint32_t frac_bits = static_cast(Bits(fraction, 51, 29)); + return FloatPack(sign, exp_bits, frac_bits); + } else { + VIXL_ASSERT(IsFloat64()); + return DoublePack(sign, Bits(result_exp, 10, 0), Bits(fraction, 51, 0)); + } + } +} + + +LogicVRegister Simulator::frecpe(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding round) { + dst.ClearForWrite(vform); + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SimFloat16 input = src.Float(i); + dst.SetFloat(i, FPRecipEstimate(input, round)); + } + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + float input = src.Float(i); + dst.SetFloat(i, FPRecipEstimate(input, round)); + } + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + double input = src.Float(i); + dst.SetFloat(i, FPRecipEstimate(input, round)); + } + } + return dst; +} + + +LogicVRegister Simulator::ursqrte(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + uint64_t operand; + uint32_t result; + double dp_operand, dp_result; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + operand = src.Uint(vform, i); + if (operand <= 0x3FFFFFFF) { + result = 0xFFFFFFFF; + } else { + dp_operand = operand * std::pow(2.0, -32); + dp_result = recip_sqrt_estimate(dp_operand) * std::pow(2.0, 31); + result = static_cast(dp_result); + } + dst.SetUint(vform, i, result); + } + return dst; +} + + +// Based on reference C function recip_estimate from ARM ARM. +double Simulator::recip_estimate(double a) { + int q, s; + double r; + q = static_cast(a * 512.0); + r = 1.0 / ((static_cast(q) + 0.5) / 512.0); + s = static_cast(256.0 * r + 0.5); + return static_cast(s) / 256.0; +} + + +LogicVRegister Simulator::urecpe(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + uint64_t operand; + uint32_t result; + double dp_operand, dp_result; + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + operand = src.Uint(vform, i); + if (operand <= 0x7FFFFFFF) { + result = 0xFFFFFFFF; + } else { + dp_operand = operand * std::pow(2.0, -32); + dp_result = recip_estimate(dp_operand) * std::pow(2.0, 31); + result = static_cast(dp_result); + } + dst.SetUint(vform, i, result); + } + return dst; +} + +template +LogicVRegister Simulator::frecpx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + dst.ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + T op = src.Float(i); + T result; + if (IsNaN(op)) { + result = FPProcessNaN(op); + } else { + int exp; + uint32_t sign; + if (IsFloat16()) { + sign = Float16Sign(op); + exp = Float16Exp(op); + exp = (exp == 0) ? (0x1F - 1) : static_cast(Bits(~exp, 4, 0)); + result = Float16Pack(sign, exp, 0); + } else if (IsFloat32()) { + sign = FloatSign(op); + exp = FloatExp(op); + exp = (exp == 0) ? (0xFF - 1) : static_cast(Bits(~exp, 7, 0)); + result = FloatPack(sign, exp, 0); + } else { + VIXL_ASSERT(IsFloat64()); + sign = DoubleSign(op); + exp = DoubleExp(op); + exp = (exp == 0) ? (0x7FF - 1) : static_cast(Bits(~exp, 10, 0)); + result = DoublePack(sign, exp, 0); + } + } + dst.SetFloat(i, result); + } + return dst; +} + + +LogicVRegister Simulator::frecpx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + frecpx(vform, dst, src); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + frecpx(vform, dst, src); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + frecpx(vform, dst, src); + } + return dst; +} + +LogicVRegister Simulator::scvtf(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int fbits, + FPRounding round) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + SimFloat16 result = FixedToFloat16(src.Int(kFormatH, i), fbits, round); + dst.SetFloat(i, result); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + float result = FixedToFloat(src.Int(kFormatS, i), fbits, round); + dst.SetFloat(i, result); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + double result = FixedToDouble(src.Int(kFormatD, i), fbits, round); + dst.SetFloat(i, result); + } + } + return dst; +} + + +LogicVRegister Simulator::ucvtf(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int fbits, + FPRounding round) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + if (LaneSizeInBitsFromFormat(vform) == kHRegSize) { + SimFloat16 result = UFixedToFloat16(src.Uint(kFormatH, i), fbits, round); + dst.SetFloat(i, result); + } else if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { + float result = UFixedToFloat(src.Uint(kFormatS, i), fbits, round); + dst.SetFloat(i, result); + } else { + VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); + double result = UFixedToDouble(src.Uint(kFormatD, i), fbits, round); + dst.SetFloat(i, result); + } + } + return dst; +} + + +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_INCLUDE_SIMULATOR_AARCH64 diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/macro-assembler-aarch64.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/macro-assembler-aarch64.cc new file mode 100644 index 00000000..f57efd6b --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/macro-assembler-aarch64.cc @@ -0,0 +1,3059 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include "macro-assembler-aarch64.h" + +namespace vixl { +namespace aarch64 { + + +void Pool::Release() { + if (--monitor_ == 0) { + // Ensure the pool has not been blocked for too long. + VIXL_ASSERT(masm_->GetCursorOffset() < checkpoint_); + } +} + + +void Pool::SetNextCheckpoint(ptrdiff_t checkpoint) { + masm_->checkpoint_ = std::min(masm_->checkpoint_, checkpoint); + checkpoint_ = checkpoint; +} + + +LiteralPool::LiteralPool(MacroAssembler* masm) + : Pool(masm), + size_(0), + first_use_(-1), + recommended_checkpoint_(kNoCheckpointRequired) {} + + +LiteralPool::~LiteralPool() { + VIXL_ASSERT(IsEmpty()); + VIXL_ASSERT(!IsBlocked()); + for (std::vector::iterator it = deleted_on_destruction_.begin(); + it != deleted_on_destruction_.end(); + it++) { + delete *it; + } +} + + +void LiteralPool::Reset() { + std::vector::iterator it, end; + for (it = entries_.begin(), end = entries_.end(); it != end; ++it) { + RawLiteral* literal = *it; + if (literal->deletion_policy_ == RawLiteral::kDeletedOnPlacementByPool) { + delete literal; + } + } + entries_.clear(); + size_ = 0; + first_use_ = -1; + Pool::Reset(); + recommended_checkpoint_ = kNoCheckpointRequired; +} + + +void LiteralPool::CheckEmitFor(size_t amount, EmitOption option) { + if (IsEmpty() || IsBlocked()) return; + + ptrdiff_t distance = masm_->GetCursorOffset() + amount - first_use_; + if (distance >= kRecommendedLiteralPoolRange) { + Emit(option); + } +} + + +void LiteralPool::CheckEmitForBranch(size_t range) { + if (IsEmpty() || IsBlocked()) return; + if (GetMaxSize() >= range) Emit(); +} + +// We use a subclass to access the protected `ExactAssemblyScope` constructor +// giving us control over the pools. This allows us to use this scope within +// code emitting pools without creating a circular dependency. +// We keep the constructor private to restrict usage of this helper class. +class ExactAssemblyScopeWithoutPoolsCheck : public ExactAssemblyScope { + private: + ExactAssemblyScopeWithoutPoolsCheck(MacroAssembler* masm, size_t size) + : ExactAssemblyScope(masm, + size, + ExactAssemblyScope::kExactSize, + ExactAssemblyScope::kIgnorePools) {} + + friend void LiteralPool::Emit(LiteralPool::EmitOption); + friend void VeneerPool::Emit(VeneerPool::EmitOption, size_t); +}; + + +void LiteralPool::Emit(EmitOption option) { + // There is an issue if we are asked to emit a blocked or empty pool. + VIXL_ASSERT(!IsBlocked()); + VIXL_ASSERT(!IsEmpty()); + + size_t pool_size = GetSize(); + size_t emit_size = pool_size; + if (option == kBranchRequired) emit_size += kInstructionSize; + Label end_of_pool; + + VIXL_ASSERT(emit_size % kInstructionSize == 0); + { + CodeBufferCheckScope guard(masm_, + emit_size, + CodeBufferCheckScope::kCheck, + CodeBufferCheckScope::kExactSize); +#ifdef VIXL_DEBUG + // Also explicitly disallow usage of the `MacroAssembler` here. + masm_->SetAllowMacroInstructions(false); +#endif + if (option == kBranchRequired) { + ExactAssemblyScopeWithoutPoolsCheck guard(masm_, kInstructionSize); + masm_->b(&end_of_pool); + } + + { + // Marker indicating the size of the literal pool in 32-bit words. + VIXL_ASSERT((pool_size % kWRegSizeInBytes) == 0); + ExactAssemblyScopeWithoutPoolsCheck guard(masm_, kInstructionSize); + masm_->ldr(xzr, static_cast(pool_size / kWRegSizeInBytes)); + } + + // Now populate the literal pool. + std::vector::iterator it, end; + for (it = entries_.begin(), end = entries_.end(); it != end; ++it) { + VIXL_ASSERT((*it)->IsUsed()); + masm_->place(*it); + } + + if (option == kBranchRequired) masm_->bind(&end_of_pool); +#ifdef VIXL_DEBUG + masm_->SetAllowMacroInstructions(true); +#endif + } + + Reset(); +} + + +void LiteralPool::AddEntry(RawLiteral* literal) { + // A literal must be registered immediately before its first use. Here we + // cannot control that it is its first use, but we check no code has been + // emitted since its last use. + VIXL_ASSERT(masm_->GetCursorOffset() == literal->GetLastUse()); + + UpdateFirstUse(masm_->GetCursorOffset()); + VIXL_ASSERT(masm_->GetCursorOffset() >= first_use_); + entries_.push_back(literal); + size_ += literal->GetSize(); +} + + +void LiteralPool::UpdateFirstUse(ptrdiff_t use_position) { + first_use_ = std::min(first_use_, use_position); + if (first_use_ == -1) { + first_use_ = use_position; + SetNextRecommendedCheckpoint(GetNextRecommendedCheckpoint()); + SetNextCheckpoint(first_use_ + Instruction::kLoadLiteralRange); + } else { + VIXL_ASSERT(use_position > first_use_); + } +} + + +void VeneerPool::Reset() { + Pool::Reset(); + unresolved_branches_.Reset(); +} + + +void VeneerPool::Release() { + if (--monitor_ == 0) { + VIXL_ASSERT(IsEmpty() || + masm_->GetCursorOffset() < + unresolved_branches_.GetFirstLimit()); + } +} + + +void VeneerPool::RegisterUnresolvedBranch(ptrdiff_t branch_pos, + Label* label, + ImmBranchType branch_type) { + VIXL_ASSERT(!label->IsBound()); + BranchInfo branch_info = BranchInfo(branch_pos, label, branch_type); + unresolved_branches_.insert(branch_info); + UpdateNextCheckPoint(); + // TODO: In debug mode register the label with the assembler to make sure it + // is bound with masm Bind and not asm bind. +} + + +void VeneerPool::DeleteUnresolvedBranchInfoForLabel(Label* label) { + if (IsEmpty()) { + VIXL_ASSERT(checkpoint_ == kNoCheckpointRequired); + return; + } + + if (label->IsLinked()) { + Label::LabelLinksIterator links_it(label); + for (; !links_it.Done(); links_it.Advance()) { + ptrdiff_t link_offset = *links_it.Current(); + Instruction* link = masm_->GetInstructionAt(link_offset); + + // ADR instructions are not handled. + if (BranchTypeUsesVeneers(link->GetBranchType())) { + BranchInfo branch_info(link_offset, label, link->GetBranchType()); + unresolved_branches_.erase(branch_info); + } + } + } + + UpdateNextCheckPoint(); +} + + +bool VeneerPool::ShouldEmitVeneer(int64_t first_unreacheable_pc, + size_t amount) { + ptrdiff_t offset = + kPoolNonVeneerCodeSize + amount + GetMaxSize() + GetOtherPoolsMaxSize(); + return (masm_->GetCursorOffset() + offset) > first_unreacheable_pc; +} + + +void VeneerPool::CheckEmitFor(size_t amount, EmitOption option) { + if (IsEmpty()) return; + + VIXL_ASSERT(masm_->GetCursorOffset() + kPoolNonVeneerCodeSize < + unresolved_branches_.GetFirstLimit()); + + if (IsBlocked()) return; + + if (ShouldEmitVeneers(amount)) { + Emit(option, amount); + } else { + UpdateNextCheckPoint(); + } +} + + +void VeneerPool::Emit(EmitOption option, size_t amount) { + // There is an issue if we are asked to emit a blocked or empty pool. + VIXL_ASSERT(!IsBlocked()); + VIXL_ASSERT(!IsEmpty()); + + Label end; + if (option == kBranchRequired) { + ExactAssemblyScopeWithoutPoolsCheck guard(masm_, kInstructionSize); + masm_->b(&end); + } + + // We want to avoid generating veneer pools too often, so generate veneers for + // branches that don't immediately require a veneer but will soon go out of + // range. + static const size_t kVeneerEmissionMargin = 1 * KBytes; + + for (BranchInfoSetIterator it(&unresolved_branches_); !it.Done();) { + BranchInfo* branch_info = it.Current(); + if (ShouldEmitVeneer(branch_info->first_unreacheable_pc_, + amount + kVeneerEmissionMargin)) { + CodeBufferCheckScope scope(masm_, + kVeneerCodeSize, + CodeBufferCheckScope::kCheck, + CodeBufferCheckScope::kExactSize); + ptrdiff_t branch_pos = branch_info->pc_offset_; + Instruction* branch = masm_->GetInstructionAt(branch_pos); + Label* label = branch_info->label_; + + // Patch the branch to point to the current position, and emit a branch + // to the label. + Instruction* veneer = masm_->GetCursorAddress(); + branch->SetImmPCOffsetTarget(veneer); + { + ExactAssemblyScopeWithoutPoolsCheck guard(masm_, kInstructionSize); + masm_->b(label); + } + + // Update the label. The branch patched does not point to it any longer. + label->DeleteLink(branch_pos); + + it.DeleteCurrentAndAdvance(); + } else { + it.AdvanceToNextType(); + } + } + + UpdateNextCheckPoint(); + + masm_->bind(&end); +} + + +MacroAssembler::MacroAssembler(PositionIndependentCodeOption pic) + : Assembler(pic), +#ifdef VIXL_DEBUG + allow_macro_instructions_(true), +#endif + generate_simulator_code_(VIXL_AARCH64_GENERATE_SIMULATOR_CODE), + sp_(sp), + tmp_list_(ip0, ip1), + fptmp_list_(d31), + current_scratch_scope_(NULL), + literal_pool_(this), + veneer_pool_(this), + recommended_checkpoint_(Pool::kNoCheckpointRequired) { + checkpoint_ = GetNextCheckPoint(); +#ifndef VIXL_DEBUG + USE(allow_macro_instructions_); +#endif +} + + +MacroAssembler::MacroAssembler(size_t capacity, + PositionIndependentCodeOption pic) + : Assembler(capacity, pic), +#ifdef VIXL_DEBUG + allow_macro_instructions_(true), +#endif + generate_simulator_code_(VIXL_AARCH64_GENERATE_SIMULATOR_CODE), + sp_(sp), + tmp_list_(ip0, ip1), + fptmp_list_(d31), + current_scratch_scope_(NULL), + literal_pool_(this), + veneer_pool_(this), + recommended_checkpoint_(Pool::kNoCheckpointRequired) { + checkpoint_ = GetNextCheckPoint(); +} + + +MacroAssembler::MacroAssembler(byte* buffer, + size_t capacity, + PositionIndependentCodeOption pic) + : Assembler(buffer, capacity, pic), +#ifdef VIXL_DEBUG + allow_macro_instructions_(true), +#endif + generate_simulator_code_(VIXL_AARCH64_GENERATE_SIMULATOR_CODE), + sp_(sp), + tmp_list_(ip0, ip1), + fptmp_list_(d31), + current_scratch_scope_(NULL), + literal_pool_(this), + veneer_pool_(this), + recommended_checkpoint_(Pool::kNoCheckpointRequired) { + checkpoint_ = GetNextCheckPoint(); +} + + +MacroAssembler::~MacroAssembler() {} + + +void MacroAssembler::Reset() { + Assembler::Reset(); + + VIXL_ASSERT(!literal_pool_.IsBlocked()); + literal_pool_.Reset(); + veneer_pool_.Reset(); + + checkpoint_ = GetNextCheckPoint(); +} + + +void MacroAssembler::FinalizeCode(FinalizeOption option) { + if (!literal_pool_.IsEmpty()) { + // The user may decide to emit more code after Finalize, emit a branch if + // that's the case. + literal_pool_.Emit(option == kUnreachable ? Pool::kNoBranchRequired + : Pool::kBranchRequired); + } + VIXL_ASSERT(veneer_pool_.IsEmpty()); + + Assembler::FinalizeCode(); +} + + +void MacroAssembler::CheckEmitFor(size_t amount) { + CheckEmitPoolsFor(amount); + GetBuffer()->EnsureSpaceFor(amount); +} + + +void MacroAssembler::CheckEmitPoolsFor(size_t amount) { + literal_pool_.CheckEmitFor(amount); + veneer_pool_.CheckEmitFor(amount); + checkpoint_ = GetNextCheckPoint(); +} + + +int MacroAssembler::MoveImmediateHelper(MacroAssembler* masm, + const Register& rd, + uint64_t imm) { + bool emit_code = (masm != NULL); + VIXL_ASSERT(IsUint32(imm) || IsInt32(imm) || rd.Is64Bits()); + // The worst case for size is mov 64-bit immediate to sp: + // * up to 4 instructions to materialise the constant + // * 1 instruction to move to sp + MacroEmissionCheckScope guard(masm); + + // Immediates on Aarch64 can be produced using an initial value, and zero to + // three move keep operations. + // + // Initial values can be generated with: + // 1. 64-bit move zero (movz). + // 2. 32-bit move inverted (movn). + // 3. 64-bit move inverted. + // 4. 32-bit orr immediate. + // 5. 64-bit orr immediate. + // Move-keep may then be used to modify each of the 16-bit half words. + // + // The code below supports all five initial value generators, and + // applying move-keep operations to move-zero and move-inverted initial + // values. + + // Try to move the immediate in one instruction, and if that fails, switch to + // using multiple instructions. + if (OneInstrMoveImmediateHelper(masm, rd, imm)) { + return 1; + } else { + int instruction_count = 0; + unsigned reg_size = rd.GetSizeInBits(); + + // Generic immediate case. Imm will be represented by + // [imm3, imm2, imm1, imm0], where each imm is 16 bits. + // A move-zero or move-inverted is generated for the first non-zero or + // non-0xffff immX, and a move-keep for subsequent non-zero immX. + + uint64_t ignored_halfword = 0; + bool invert_move = false; + // If the number of 0xffff halfwords is greater than the number of 0x0000 + // halfwords, it's more efficient to use move-inverted. + if (CountClearHalfWords(~imm, reg_size) > + CountClearHalfWords(imm, reg_size)) { + ignored_halfword = 0xffff; + invert_move = true; + } + + // Mov instructions can't move values into the stack pointer, so set up a + // temporary register, if needed. + UseScratchRegisterScope temps; + Register temp; + if (emit_code) { + temps.Open(masm); + temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd; + } + + // Iterate through the halfwords. Use movn/movz for the first non-ignored + // halfword, and movk for subsequent halfwords. + VIXL_ASSERT((reg_size % 16) == 0); + bool first_mov_done = false; + for (unsigned i = 0; i < (reg_size / 16); i++) { + uint64_t imm16 = (imm >> (16 * i)) & 0xffff; + if (imm16 != ignored_halfword) { + if (!first_mov_done) { + if (invert_move) { + if (emit_code) masm->movn(temp, ~imm16 & 0xffff, 16 * i); + instruction_count++; + } else { + if (emit_code) masm->movz(temp, imm16, 16 * i); + instruction_count++; + } + first_mov_done = true; + } else { + // Construct a wider constant. + if (emit_code) masm->movk(temp, imm16, 16 * i); + instruction_count++; + } + } + } + + VIXL_ASSERT(first_mov_done); + + // Move the temporary if the original destination register was the stack + // pointer. + if (rd.IsSP()) { + if (emit_code) masm->mov(rd, temp); + instruction_count++; + } + return instruction_count; + } +} + + +bool MacroAssembler::OneInstrMoveImmediateHelper(MacroAssembler* masm, + const Register& dst, + int64_t imm) { + bool emit_code = masm != NULL; + unsigned n, imm_s, imm_r; + int reg_size = dst.GetSizeInBits(); + + if (IsImmMovz(imm, reg_size) && !dst.IsSP()) { + // Immediate can be represented in a move zero instruction. Movz can't write + // to the stack pointer. + if (emit_code) { + masm->movz(dst, imm); + } + return true; + } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) { + // Immediate can be represented in a move negative instruction. Movn can't + // write to the stack pointer. + if (emit_code) { + masm->movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask)); + } + return true; + } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) { + // Immediate can be represented in a logical orr instruction. + VIXL_ASSERT(!dst.IsZero()); + if (emit_code) { + masm->LogicalImmediate(dst, + AppropriateZeroRegFor(dst), + n, + imm_s, + imm_r, + ORR); + } + return true; + } + return false; +} + + +void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) { + VIXL_ASSERT((reg.Is(NoReg) || (type >= kBranchTypeFirstUsingReg)) && + ((bit == -1) || (type >= kBranchTypeFirstUsingBit))); + if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { + B(static_cast(type), label); + } else { + switch (type) { + case always: + B(label); + break; + case never: + break; + case reg_zero: + Cbz(reg, label); + break; + case reg_not_zero: + Cbnz(reg, label); + break; + case reg_bit_clear: + Tbz(reg, bit, label); + break; + case reg_bit_set: + Tbnz(reg, bit, label); + break; + default: + VIXL_UNREACHABLE(); + } + } +} + + +void MacroAssembler::B(Label* label) { + // We don't need to check the size of the literal pool, because the size of + // the literal pool is already bounded by the literal range, which is smaller + // than the range of this branch. + VIXL_ASSERT(Instruction::GetImmBranchForwardRange(UncondBranchType) > + Instruction::kLoadLiteralRange); + SingleEmissionCheckScope guard(this); + b(label); +} + + +void MacroAssembler::B(Label* label, Condition cond) { + // We don't need to check the size of the literal pool, because the size of + // the literal pool is already bounded by the literal range, which is smaller + // than the range of this branch. + VIXL_ASSERT(Instruction::GetImmBranchForwardRange(CondBranchType) > + Instruction::kLoadLiteralRange); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT((cond != al) && (cond != nv)); + EmissionCheckScope guard(this, 2 * kInstructionSize); + + if (label->IsBound() && LabelIsOutOfRange(label, CondBranchType)) { + Label done; + b(&done, InvertCondition(cond)); + b(label); + bind(&done); + } else { + if (!label->IsBound()) { + veneer_pool_.RegisterUnresolvedBranch(GetCursorOffset(), + label, + CondBranchType); + } + b(label, cond); + } +} + + +void MacroAssembler::Cbnz(const Register& rt, Label* label) { + // We don't need to check the size of the literal pool, because the size of + // the literal pool is already bounded by the literal range, which is smaller + // than the range of this branch. + VIXL_ASSERT(Instruction::GetImmBranchForwardRange(CompareBranchType) > + Instruction::kLoadLiteralRange); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.IsZero()); + EmissionCheckScope guard(this, 2 * kInstructionSize); + + if (label->IsBound() && LabelIsOutOfRange(label, CondBranchType)) { + Label done; + cbz(rt, &done); + b(label); + bind(&done); + } else { + if (!label->IsBound()) { + veneer_pool_.RegisterUnresolvedBranch(GetCursorOffset(), + label, + CompareBranchType); + } + cbnz(rt, label); + } +} + + +void MacroAssembler::Cbz(const Register& rt, Label* label) { + // We don't need to check the size of the literal pool, because the size of + // the literal pool is already bounded by the literal range, which is smaller + // than the range of this branch. + VIXL_ASSERT(Instruction::GetImmBranchForwardRange(CompareBranchType) > + Instruction::kLoadLiteralRange); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.IsZero()); + EmissionCheckScope guard(this, 2 * kInstructionSize); + + if (label->IsBound() && LabelIsOutOfRange(label, CondBranchType)) { + Label done; + cbnz(rt, &done); + b(label); + bind(&done); + } else { + if (!label->IsBound()) { + veneer_pool_.RegisterUnresolvedBranch(GetCursorOffset(), + label, + CompareBranchType); + } + cbz(rt, label); + } +} + + +void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { + // This is to avoid a situation where emitting a veneer for a TBZ/TBNZ branch + // can become impossible because we emit the literal pool first. + literal_pool_.CheckEmitForBranch( + Instruction::GetImmBranchForwardRange(TestBranchType)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.IsZero()); + EmissionCheckScope guard(this, 2 * kInstructionSize); + + if (label->IsBound() && LabelIsOutOfRange(label, TestBranchType)) { + Label done; + tbz(rt, bit_pos, &done); + b(label); + bind(&done); + } else { + if (!label->IsBound()) { + veneer_pool_.RegisterUnresolvedBranch(GetCursorOffset(), + label, + TestBranchType); + } + tbnz(rt, bit_pos, label); + } +} + + +void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { + // This is to avoid a situation where emitting a veneer for a TBZ/TBNZ branch + // can become impossible because we emit the literal pool first. + literal_pool_.CheckEmitForBranch( + Instruction::GetImmBranchForwardRange(TestBranchType)); + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.IsZero()); + EmissionCheckScope guard(this, 2 * kInstructionSize); + + if (label->IsBound() && LabelIsOutOfRange(label, TestBranchType)) { + Label done; + tbnz(rt, bit_pos, &done); + b(label); + bind(&done); + } else { + if (!label->IsBound()) { + veneer_pool_.RegisterUnresolvedBranch(GetCursorOffset(), + label, + TestBranchType); + } + tbz(rt, bit_pos, label); + } +} + +void MacroAssembler::Bind(Label* label, BranchTargetIdentifier id) { + VIXL_ASSERT(allow_macro_instructions_); + veneer_pool_.DeleteUnresolvedBranchInfoForLabel(label); + if (id == EmitBTI_none) { + bind(label); + } else { + // Emit this inside an ExactAssemblyScope to ensure there are no extra + // instructions between the bind and the target identifier instruction. + ExactAssemblyScope scope(this, kInstructionSize); + bind(label); + if (id == EmitPACIASP) { + paciasp(); + } else if (id == EmitPACIBSP) { + pacibsp(); + } else { + bti(id); + } + } +} + +// Bind a label to a specified offset from the start of the buffer. +void MacroAssembler::BindToOffset(Label* label, ptrdiff_t offset) { + VIXL_ASSERT(allow_macro_instructions_); + veneer_pool_.DeleteUnresolvedBranchInfoForLabel(label); + Assembler::BindToOffset(label, offset); +} + + +void MacroAssembler::And(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + LogicalMacro(rd, rn, operand, AND); +} + + +void MacroAssembler::Ands(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + LogicalMacro(rd, rn, operand, ANDS); +} + + +void MacroAssembler::Tst(const Register& rn, const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + Ands(AppropriateZeroRegFor(rn), rn, operand); +} + + +void MacroAssembler::Bic(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + LogicalMacro(rd, rn, operand, BIC); +} + + +void MacroAssembler::Bics(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + LogicalMacro(rd, rn, operand, BICS); +} + + +void MacroAssembler::Orr(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + LogicalMacro(rd, rn, operand, ORR); +} + + +void MacroAssembler::Orn(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + LogicalMacro(rd, rn, operand, ORN); +} + + +void MacroAssembler::Eor(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + LogicalMacro(rd, rn, operand, EOR); +} + + +void MacroAssembler::Eon(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + LogicalMacro(rd, rn, operand, EON); +} + + +void MacroAssembler::LogicalMacro(const Register& rd, + const Register& rn, + const Operand& operand, + LogicalOp op) { + // The worst case for size is logical immediate to sp: + // * up to 4 instructions to materialise the constant + // * 1 instruction to do the operation + // * 1 instruction to move to sp + MacroEmissionCheckScope guard(this); + UseScratchRegisterScope temps(this); + + if (operand.IsImmediate()) { + uint64_t immediate = operand.GetImmediate(); + unsigned reg_size = rd.GetSizeInBits(); + + // If the operation is NOT, invert the operation and immediate. + if ((op & NOT) == NOT) { + op = static_cast(op & ~NOT); + immediate = ~immediate; + } + + // Ignore the top 32 bits of an immediate if we're moving to a W register. + if (rd.Is32Bits()) { + // Check that the top 32 bits are consistent. + VIXL_ASSERT(((immediate >> kWRegSize) == 0) || + ((immediate >> kWRegSize) == 0xffffffff)); + immediate &= kWRegMask; + } + + VIXL_ASSERT(rd.Is64Bits() || IsUint32(immediate)); + + // Special cases for all set or all clear immediates. + if (immediate == 0) { + switch (op) { + case AND: + Mov(rd, 0); + return; + case ORR: + VIXL_FALLTHROUGH(); + case EOR: + Mov(rd, rn); + return; + case ANDS: + VIXL_FALLTHROUGH(); + case BICS: + break; + default: + VIXL_UNREACHABLE(); + } + } else if ((rd.Is64Bits() && (immediate == UINT64_C(0xffffffffffffffff))) || + (rd.Is32Bits() && (immediate == UINT64_C(0x00000000ffffffff)))) { + switch (op) { + case AND: + Mov(rd, rn); + return; + case ORR: + Mov(rd, immediate); + return; + case EOR: + Mvn(rd, rn); + return; + case ANDS: + VIXL_FALLTHROUGH(); + case BICS: + break; + default: + VIXL_UNREACHABLE(); + } + } + + unsigned n, imm_s, imm_r; + if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { + // Immediate can be encoded in the instruction. + LogicalImmediate(rd, rn, n, imm_s, imm_r, op); + } else { + // Immediate can't be encoded: synthesize using move immediate. + Register temp = temps.AcquireSameSizeAs(rn); + + // If the left-hand input is the stack pointer, we can't pre-shift the + // immediate, as the encoding won't allow the subsequent post shift. + PreShiftImmMode mode = rn.IsSP() ? kNoShift : kAnyShift; + Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode); + + if (rd.Is(sp)) { + // If rd is the stack pointer we cannot use it as the destination + // register so we use the temp register as an intermediate again. + Logical(temp, rn, imm_operand, op); + Mov(sp, temp); + } else { + Logical(rd, rn, imm_operand, op); + } + } + } else if (operand.IsExtendedRegister()) { + VIXL_ASSERT(operand.GetRegister().GetSizeInBits() <= rd.GetSizeInBits()); + // Add/sub extended supports shift <= 4. We want to support exactly the + // same modes here. + VIXL_ASSERT(operand.GetShiftAmount() <= 4); + VIXL_ASSERT( + operand.GetRegister().Is64Bits() || + ((operand.GetExtend() != UXTX) && (operand.GetExtend() != SXTX))); + + temps.Exclude(operand.GetRegister()); + Register temp = temps.AcquireSameSizeAs(rn); + EmitExtendShift(temp, + operand.GetRegister(), + operand.GetExtend(), + operand.GetShiftAmount()); + Logical(rd, rn, Operand(temp), op); + } else { + // The operand can be encoded in the instruction. + VIXL_ASSERT(operand.IsShiftedRegister()); + Logical(rd, rn, operand, op); + } +} + + +void MacroAssembler::Mov(const Register& rd, + const Operand& operand, + DiscardMoveMode discard_mode) { + VIXL_ASSERT(allow_macro_instructions_); + // The worst case for size is mov immediate with up to 4 instructions. + MacroEmissionCheckScope guard(this); + + if (operand.IsImmediate()) { + // Call the macro assembler for generic immediates. + Mov(rd, operand.GetImmediate()); + } else if (operand.IsShiftedRegister() && (operand.GetShiftAmount() != 0)) { + // Emit a shift instruction if moving a shifted register. This operation + // could also be achieved using an orr instruction (like orn used by Mvn), + // but using a shift instruction makes the disassembly clearer. + EmitShift(rd, + operand.GetRegister(), + operand.GetShift(), + operand.GetShiftAmount()); + } else if (operand.IsExtendedRegister()) { + // Emit an extend instruction if moving an extended register. This handles + // extend with post-shift operations, too. + EmitExtendShift(rd, + operand.GetRegister(), + operand.GetExtend(), + operand.GetShiftAmount()); + } else { + Mov(rd, operand.GetRegister(), discard_mode); + } +} + + +void MacroAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) { + VIXL_ASSERT(IsUint16(imm)); + int byte1 = (imm & 0xff); + int byte2 = ((imm >> 8) & 0xff); + if (byte1 == byte2) { + movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1); + } else if (byte1 == 0) { + movi(vd, byte2, LSL, 8); + } else if (byte2 == 0) { + movi(vd, byte1); + } else if (byte1 == 0xff) { + mvni(vd, ~byte2 & 0xff, LSL, 8); + } else if (byte2 == 0xff) { + mvni(vd, ~byte1 & 0xff); + } else { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireW(); + movz(temp, imm); + dup(vd, temp); + } +} + + +void MacroAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) { + VIXL_ASSERT(IsUint32(imm)); + + uint8_t bytes[sizeof(imm)]; + memcpy(bytes, &imm, sizeof(imm)); + + // All bytes are either 0x00 or 0xff. + { + bool all0orff = true; + for (int i = 0; i < 4; ++i) { + if ((bytes[i] != 0) && (bytes[i] != 0xff)) { + all0orff = false; + break; + } + } + + if (all0orff == true) { + movi(vd.Is64Bits() ? vd.V1D() : vd.V2D(), ((imm << 32) | imm)); + return; + } + } + + // Of the 4 bytes, only one byte is non-zero. + for (int i = 0; i < 4; i++) { + if ((imm & (0xff << (i * 8))) == imm) { + movi(vd, bytes[i], LSL, i * 8); + return; + } + } + + // Of the 4 bytes, only one byte is not 0xff. + for (int i = 0; i < 4; i++) { + uint32_t mask = ~(0xff << (i * 8)); + if ((imm & mask) == mask) { + mvni(vd, ~bytes[i] & 0xff, LSL, i * 8); + return; + } + } + + // Immediate is of the form 0x00MMFFFF. + if ((imm & 0xff00ffff) == 0x0000ffff) { + movi(vd, bytes[2], MSL, 16); + return; + } + + // Immediate is of the form 0x0000MMFF. + if ((imm & 0xffff00ff) == 0x000000ff) { + movi(vd, bytes[1], MSL, 8); + return; + } + + // Immediate is of the form 0xFFMM0000. + if ((imm & 0xff00ffff) == 0xff000000) { + mvni(vd, ~bytes[2] & 0xff, MSL, 16); + return; + } + // Immediate is of the form 0xFFFFMM00. + if ((imm & 0xffff00ff) == 0xffff0000) { + mvni(vd, ~bytes[1] & 0xff, MSL, 8); + return; + } + + // Top and bottom 16-bits are equal. + if (((imm >> 16) & 0xffff) == (imm & 0xffff)) { + Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xffff); + return; + } + + // Default case. + { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireW(); + Mov(temp, imm); + dup(vd, temp); + } +} + + +void MacroAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) { + // All bytes are either 0x00 or 0xff. + { + bool all0orff = true; + for (int i = 0; i < 8; ++i) { + int byteval = (imm >> (i * 8)) & 0xff; + if (byteval != 0 && byteval != 0xff) { + all0orff = false; + break; + } + } + if (all0orff == true) { + movi(vd, imm); + return; + } + } + + // Top and bottom 32-bits are equal. + if (((imm >> 32) & 0xffffffff) == (imm & 0xffffffff)) { + Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xffffffff); + return; + } + + // Default case. + { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireX(); + Mov(temp, imm); + if (vd.Is1D()) { + mov(vd.D(), 0, temp); + } else { + dup(vd.V2D(), temp); + } + } +} + + +void MacroAssembler::Movi(const VRegister& vd, + uint64_t imm, + Shift shift, + int shift_amount) { + VIXL_ASSERT(allow_macro_instructions_); + MacroEmissionCheckScope guard(this); + if (shift_amount != 0 || shift != LSL) { + movi(vd, imm, shift, shift_amount); + } else if (vd.Is8B() || vd.Is16B()) { + // 8-bit immediate. + VIXL_ASSERT(IsUint8(imm)); + movi(vd, imm); + } else if (vd.Is4H() || vd.Is8H()) { + // 16-bit immediate. + Movi16bitHelper(vd, imm); + } else if (vd.Is2S() || vd.Is4S()) { + // 32-bit immediate. + Movi32bitHelper(vd, imm); + } else { + // 64-bit immediate. + Movi64bitHelper(vd, imm); + } +} + + +void MacroAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) { + // TODO: Move 128-bit values in a more efficient way. + VIXL_ASSERT(vd.Is128Bits()); + UseScratchRegisterScope temps(this); + Movi(vd.V2D(), lo); + Register temp = temps.AcquireX(); + Mov(temp, hi); + Ins(vd.V2D(), 1, temp); +} + + +void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + // The worst case for size is mvn immediate with up to 4 instructions. + MacroEmissionCheckScope guard(this); + + if (operand.IsImmediate()) { + // Call the macro assembler for generic immediates. + Mvn(rd, operand.GetImmediate()); + } else if (operand.IsExtendedRegister()) { + UseScratchRegisterScope temps(this); + temps.Exclude(operand.GetRegister()); + + // Emit two instructions for the extend case. This differs from Mov, as + // the extend and invert can't be achieved in one instruction. + Register temp = temps.AcquireSameSizeAs(rd); + EmitExtendShift(temp, + operand.GetRegister(), + operand.GetExtend(), + operand.GetShiftAmount()); + mvn(rd, Operand(temp)); + } else { + // Otherwise, register and shifted register cases can be handled by the + // assembler directly, using orn. + mvn(rd, operand); + } +} + + +void MacroAssembler::Mov(const Register& rd, uint64_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + MoveImmediateHelper(this, rd, imm); +} + + +void MacroAssembler::Ccmp(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + if (operand.IsImmediate() && (operand.GetImmediate() < 0)) { + ConditionalCompareMacro(rn, -operand.GetImmediate(), nzcv, cond, CCMN); + } else { + ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP); + } +} + + +void MacroAssembler::Ccmn(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + if (operand.IsImmediate() && (operand.GetImmediate() < 0)) { + ConditionalCompareMacro(rn, -operand.GetImmediate(), nzcv, cond, CCMP); + } else { + ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN); + } +} + + +void MacroAssembler::ConditionalCompareMacro(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond, + ConditionalCompareOp op) { + VIXL_ASSERT((cond != al) && (cond != nv)); + // The worst case for size is ccmp immediate: + // * up to 4 instructions to materialise the constant + // * 1 instruction for ccmp + MacroEmissionCheckScope guard(this); + + if ((operand.IsShiftedRegister() && (operand.GetShiftAmount() == 0)) || + (operand.IsImmediate() && + IsImmConditionalCompare(operand.GetImmediate()))) { + // The immediate can be encoded in the instruction, or the operand is an + // unshifted register: call the assembler. + ConditionalCompare(rn, operand, nzcv, cond, op); + } else { + UseScratchRegisterScope temps(this); + // The operand isn't directly supported by the instruction: perform the + // operation on a temporary register. + Register temp = temps.AcquireSameSizeAs(rn); + Mov(temp, operand); + ConditionalCompare(rn, temp, nzcv, cond, op); + } +} + + +void MacroAssembler::CselHelper(MacroAssembler* masm, + const Register& rd, + Operand left, + Operand right, + Condition cond, + bool* should_synthesise_left, + bool* should_synthesise_right) { + bool emit_code = (masm != NULL); + + VIXL_ASSERT(!emit_code || masm->allow_macro_instructions_); + VIXL_ASSERT((cond != al) && (cond != nv)); + VIXL_ASSERT(!rd.IsZero() && !rd.IsSP()); + VIXL_ASSERT(left.IsImmediate() || !left.GetRegister().IsSP()); + VIXL_ASSERT(right.IsImmediate() || !right.GetRegister().IsSP()); + + if (should_synthesise_left != NULL) *should_synthesise_left = false; + if (should_synthesise_right != NULL) *should_synthesise_right = false; + + // The worst case for size occurs when the inputs are two non encodable + // constants: + // * up to 4 instructions to materialise the left constant + // * up to 4 instructions to materialise the right constant + // * 1 instruction for csel + EmissionCheckScope guard(masm, 9 * kInstructionSize); + UseScratchRegisterScope temps; + if (masm != NULL) { + temps.Open(masm); + } + + // Try to handle cases where both inputs are immediates. + bool left_is_immediate = left.IsImmediate() || left.IsZero(); + bool right_is_immediate = right.IsImmediate() || right.IsZero(); + if (left_is_immediate && right_is_immediate && + CselSubHelperTwoImmediates(masm, + rd, + left.GetEquivalentImmediate(), + right.GetEquivalentImmediate(), + cond, + should_synthesise_left, + should_synthesise_right)) { + return; + } + + // Handle cases where one of the two inputs is -1, 0, or 1. + bool left_is_small_immediate = + left_is_immediate && ((-1 <= left.GetEquivalentImmediate()) && + (left.GetEquivalentImmediate() <= 1)); + bool right_is_small_immediate = + right_is_immediate && ((-1 <= right.GetEquivalentImmediate()) && + (right.GetEquivalentImmediate() <= 1)); + if (right_is_small_immediate || left_is_small_immediate) { + bool swapped_inputs = false; + if (!right_is_small_immediate) { + std::swap(left, right); + cond = InvertCondition(cond); + swapped_inputs = true; + } + CselSubHelperRightSmallImmediate(masm, + &temps, + rd, + left, + right, + cond, + swapped_inputs ? should_synthesise_right + : should_synthesise_left); + return; + } + + // Otherwise both inputs need to be available in registers. Synthesise them + // if necessary and emit the `csel`. + if (!left.IsPlainRegister()) { + if (emit_code) { + Register temp = temps.AcquireSameSizeAs(rd); + masm->Mov(temp, left); + left = temp; + } + if (should_synthesise_left != NULL) *should_synthesise_left = true; + } + if (!right.IsPlainRegister()) { + if (emit_code) { + Register temp = temps.AcquireSameSizeAs(rd); + masm->Mov(temp, right); + right = temp; + } + if (should_synthesise_right != NULL) *should_synthesise_right = true; + } + if (emit_code) { + VIXL_ASSERT(left.IsPlainRegister() && right.IsPlainRegister()); + if (left.GetRegister().Is(right.GetRegister())) { + masm->Mov(rd, left.GetRegister()); + } else { + masm->csel(rd, left.GetRegister(), right.GetRegister(), cond); + } + } +} + + +bool MacroAssembler::CselSubHelperTwoImmediates(MacroAssembler* masm, + const Register& rd, + int64_t left, + int64_t right, + Condition cond, + bool* should_synthesise_left, + bool* should_synthesise_right) { + bool emit_code = (masm != NULL); + if (should_synthesise_left != NULL) *should_synthesise_left = false; + if (should_synthesise_right != NULL) *should_synthesise_right = false; + + if (left == right) { + if (emit_code) masm->Mov(rd, left); + return true; + } else if (left == -right) { + if (should_synthesise_right != NULL) *should_synthesise_right = true; + if (emit_code) { + masm->Mov(rd, right); + masm->Cneg(rd, rd, cond); + } + return true; + } + + if (CselSubHelperTwoOrderedImmediates(masm, rd, left, right, cond)) { + return true; + } else { + std::swap(left, right); + if (CselSubHelperTwoOrderedImmediates(masm, + rd, + left, + right, + InvertCondition(cond))) { + return true; + } + } + + // TODO: Handle more situations. For example handle `csel rd, #5, #6, cond` + // with `cinc`. + return false; +} + + +bool MacroAssembler::CselSubHelperTwoOrderedImmediates(MacroAssembler* masm, + const Register& rd, + int64_t left, + int64_t right, + Condition cond) { + bool emit_code = (masm != NULL); + + if ((left == 1) && (right == 0)) { + if (emit_code) masm->cset(rd, cond); + return true; + } else if ((left == -1) && (right == 0)) { + if (emit_code) masm->csetm(rd, cond); + return true; + } + return false; +} + + +void MacroAssembler::CselSubHelperRightSmallImmediate( + MacroAssembler* masm, + UseScratchRegisterScope* temps, + const Register& rd, + const Operand& left, + const Operand& right, + Condition cond, + bool* should_synthesise_left) { + bool emit_code = (masm != NULL); + VIXL_ASSERT((right.IsImmediate() || right.IsZero()) && + (-1 <= right.GetEquivalentImmediate()) && + (right.GetEquivalentImmediate() <= 1)); + Register left_register; + + if (left.IsPlainRegister()) { + left_register = left.GetRegister(); + } else { + if (emit_code) { + left_register = temps->AcquireSameSizeAs(rd); + masm->Mov(left_register, left); + } + if (should_synthesise_left != NULL) *should_synthesise_left = true; + } + if (emit_code) { + int64_t imm = right.GetEquivalentImmediate(); + Register zr = AppropriateZeroRegFor(rd); + if (imm == 0) { + masm->csel(rd, left_register, zr, cond); + } else if (imm == 1) { + masm->csinc(rd, left_register, zr, cond); + } else { + VIXL_ASSERT(imm == -1); + masm->csinv(rd, left_register, zr, cond); + } + } +} + + +void MacroAssembler::Add(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S) { + VIXL_ASSERT(allow_macro_instructions_); + if (operand.IsImmediate() && (operand.GetImmediate() < 0) && + IsImmAddSub(-operand.GetImmediate())) { + AddSubMacro(rd, rn, -operand.GetImmediate(), S, SUB); + } else { + AddSubMacro(rd, rn, operand, S, ADD); + } +} + + +void MacroAssembler::Adds(const Register& rd, + const Register& rn, + const Operand& operand) { + Add(rd, rn, operand, SetFlags); +} + + +void MacroAssembler::Sub(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S) { + VIXL_ASSERT(allow_macro_instructions_); + if (operand.IsImmediate() && (operand.GetImmediate() < 0) && + IsImmAddSub(-operand.GetImmediate())) { + AddSubMacro(rd, rn, -operand.GetImmediate(), S, ADD); + } else { + AddSubMacro(rd, rn, operand, S, SUB); + } +} + + +void MacroAssembler::Subs(const Register& rd, + const Register& rn, + const Operand& operand) { + Sub(rd, rn, operand, SetFlags); +} + + +void MacroAssembler::Cmn(const Register& rn, const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + Adds(AppropriateZeroRegFor(rn), rn, operand); +} + + +void MacroAssembler::Cmp(const Register& rn, const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + Subs(AppropriateZeroRegFor(rn), rn, operand); +} + + +void MacroAssembler::Fcmp(const FPRegister& fn, + double value, + FPTrapFlags trap) { + VIXL_ASSERT(allow_macro_instructions_); + // The worst case for size is: + // * 1 to materialise the constant, using literal pool if necessary + // * 1 instruction for fcmp{e} + MacroEmissionCheckScope guard(this); + if (value != 0.0) { + UseScratchRegisterScope temps(this); + FPRegister tmp = temps.AcquireSameSizeAs(fn); + Fmov(tmp, value); + FPCompareMacro(fn, tmp, trap); + } else { + FPCompareMacro(fn, value, trap); + } +} + + +void MacroAssembler::Fcmpe(const FPRegister& fn, double value) { + Fcmp(fn, value, EnableTrap); +} + + +void MacroAssembler::Fmov(VRegister vd, double imm) { + VIXL_ASSERT(allow_macro_instructions_); + // Floating point immediates are loaded through the literal pool. + MacroEmissionCheckScope guard(this); + + if (vd.Is1H() || vd.Is4H() || vd.Is8H()) { + Fmov(vd, Float16(imm)); + return; + } + + if (vd.Is1S() || vd.Is2S() || vd.Is4S()) { + Fmov(vd, static_cast(imm)); + return; + } + + VIXL_ASSERT(vd.Is1D() || vd.Is2D()); + if (IsImmFP64(imm)) { + fmov(vd, imm); + } else { + uint64_t rawbits = DoubleToRawbits(imm); + if (vd.IsScalar()) { + if (rawbits == 0) { + fmov(vd, xzr); + } else { + ldr(vd, + new Literal(imm, + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool)); + } + } else { + // TODO: consider NEON support for load literal. + Movi(vd, rawbits); + } + } +} + + +void MacroAssembler::Fmov(VRegister vd, float imm) { + VIXL_ASSERT(allow_macro_instructions_); + // Floating point immediates are loaded through the literal pool. + MacroEmissionCheckScope guard(this); + + if (vd.Is1H() || vd.Is4H() || vd.Is8H()) { + Fmov(vd, Float16(imm)); + return; + } + + if (vd.Is1D() || vd.Is2D()) { + Fmov(vd, static_cast(imm)); + return; + } + + VIXL_ASSERT(vd.Is1S() || vd.Is2S() || vd.Is4S()); + if (IsImmFP32(imm)) { + fmov(vd, imm); + } else { + uint32_t rawbits = FloatToRawbits(imm); + if (vd.IsScalar()) { + if (rawbits == 0) { + fmov(vd, wzr); + } else { + ldr(vd, + new Literal(imm, + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool)); + } + } else { + // TODO: consider NEON support for load literal. + Movi(vd, rawbits); + } + } +} + + +void MacroAssembler::Fmov(VRegister vd, Float16 imm) { + VIXL_ASSERT(allow_macro_instructions_); + MacroEmissionCheckScope guard(this); + + if (vd.Is1S() || vd.Is2S() || vd.Is4S()) { + Fmov(vd, FPToFloat(imm, kIgnoreDefaultNaN)); + return; + } + + if (vd.Is1D() || vd.Is2D()) { + Fmov(vd, FPToDouble(imm, kIgnoreDefaultNaN)); + return; + } + + VIXL_ASSERT(vd.Is1H() || vd.Is4H() || vd.Is8H()); + uint16_t rawbits = Float16ToRawbits(imm); + if (IsImmFP16(imm)) { + fmov(vd, imm); + } else { + if (vd.IsScalar()) { + if (rawbits == 0x0) { + fmov(vd, wzr); + } else { + // We can use movz instead of the literal pool. + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireW(); + Mov(temp, rawbits); + Fmov(vd, temp); + } + } else { + // TODO: consider NEON support for load literal. + Movi(vd, static_cast(rawbits)); + } + } +} + + +void MacroAssembler::Neg(const Register& rd, const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + if (operand.IsImmediate()) { + Mov(rd, -operand.GetImmediate()); + } else { + Sub(rd, AppropriateZeroRegFor(rd), operand); + } +} + + +void MacroAssembler::Negs(const Register& rd, const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + Subs(rd, AppropriateZeroRegFor(rd), operand); +} + + +bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst, + int64_t imm) { + return OneInstrMoveImmediateHelper(this, dst, imm); +} + + +Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst, + int64_t imm, + PreShiftImmMode mode) { + int reg_size = dst.GetSizeInBits(); + + // Encode the immediate in a single move instruction, if possible. + if (TryOneInstrMoveImmediate(dst, imm)) { + // The move was successful; nothing to do here. + } else { + // Pre-shift the immediate to the least-significant bits of the register. + int shift_low = CountTrailingZeros(imm, reg_size); + if (mode == kLimitShiftForSP) { + // When applied to the stack pointer, the subsequent arithmetic operation + // can use the extend form to shift left by a maximum of four bits. Right + // shifts are not allowed, so we filter them out later before the new + // immediate is tested. + shift_low = std::min(shift_low, 4); + } + int64_t imm_low = imm >> shift_low; + + // Pre-shift the immediate to the most-significant bits of the register, + // inserting set bits in the least-significant bits. + int shift_high = CountLeadingZeros(imm, reg_size); + int64_t imm_high = (imm << shift_high) | ((INT64_C(1) << shift_high) - 1); + + if ((mode != kNoShift) && TryOneInstrMoveImmediate(dst, imm_low)) { + // The new immediate has been moved into the destination's low bits: + // return a new leftward-shifting operand. + return Operand(dst, LSL, shift_low); + } else if ((mode == kAnyShift) && TryOneInstrMoveImmediate(dst, imm_high)) { + // The new immediate has been moved into the destination's high bits: + // return a new rightward-shifting operand. + return Operand(dst, LSR, shift_high); + } else { + Mov(dst, imm); + } + } + return Operand(dst); +} + + +void MacroAssembler::Move(const GenericOperand& dst, + const GenericOperand& src) { + if (dst.Equals(src)) { + return; + } + + VIXL_ASSERT(dst.IsValid() && src.IsValid()); + + // The sizes of the operands must match exactly. + VIXL_ASSERT(dst.GetSizeInBits() == src.GetSizeInBits()); + VIXL_ASSERT(dst.GetSizeInBits() <= kXRegSize); + int operand_size = static_cast(dst.GetSizeInBits()); + + if (dst.IsCPURegister() && src.IsCPURegister()) { + CPURegister dst_reg = dst.GetCPURegister(); + CPURegister src_reg = src.GetCPURegister(); + if (dst_reg.IsRegister() && src_reg.IsRegister()) { + Mov(Register(dst_reg), Register(src_reg)); + } else if (dst_reg.IsVRegister() && src_reg.IsVRegister()) { + Fmov(VRegister(dst_reg), VRegister(src_reg)); + } else { + if (dst_reg.IsRegister()) { + Fmov(Register(dst_reg), VRegister(src_reg)); + } else { + Fmov(VRegister(dst_reg), Register(src_reg)); + } + } + return; + } + + if (dst.IsMemOperand() && src.IsMemOperand()) { + UseScratchRegisterScope temps(this); + CPURegister temp = temps.AcquireCPURegisterOfSize(operand_size); + Ldr(temp, src.GetMemOperand()); + Str(temp, dst.GetMemOperand()); + return; + } + + if (dst.IsCPURegister()) { + Ldr(dst.GetCPURegister(), src.GetMemOperand()); + } else { + Str(src.GetCPURegister(), dst.GetMemOperand()); + } +} + + +void MacroAssembler::ComputeAddress(const Register& dst, + const MemOperand& mem_op) { + // We cannot handle pre-indexing or post-indexing. + VIXL_ASSERT(mem_op.GetAddrMode() == Offset); + Register base = mem_op.GetBaseRegister(); + if (mem_op.IsImmediateOffset()) { + Add(dst, base, mem_op.GetOffset()); + } else { + VIXL_ASSERT(mem_op.IsRegisterOffset()); + Register reg_offset = mem_op.GetRegisterOffset(); + Shift shift = mem_op.GetShift(); + Extend extend = mem_op.GetExtend(); + if (shift == NO_SHIFT) { + VIXL_ASSERT(extend != NO_EXTEND); + Add(dst, base, Operand(reg_offset, extend, mem_op.GetShiftAmount())); + } else { + VIXL_ASSERT(extend == NO_EXTEND); + Add(dst, base, Operand(reg_offset, shift, mem_op.GetShiftAmount())); + } + } +} + + +void MacroAssembler::AddSubMacro(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubOp op) { + // Worst case is add/sub immediate: + // * up to 4 instructions to materialise the constant + // * 1 instruction for add/sub + MacroEmissionCheckScope guard(this); + + if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && + (S == LeaveFlags)) { + // The instruction would be a nop. Avoid generating useless code. + return; + } + + if ((operand.IsImmediate() && !IsImmAddSub(operand.GetImmediate())) || + (rn.IsZero() && !operand.IsShiftedRegister()) || + (operand.IsShiftedRegister() && (operand.GetShift() == ROR))) { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireSameSizeAs(rn); + if (operand.IsImmediate()) { + PreShiftImmMode mode = kAnyShift; + + // If the destination or source register is the stack pointer, we can + // only pre-shift the immediate right by values supported in the add/sub + // extend encoding. + if (rd.IsSP()) { + // If the destination is SP and flags will be set, we can't pre-shift + // the immediate at all. + mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP; + } else if (rn.IsSP()) { + mode = kLimitShiftForSP; + } + + Operand imm_operand = + MoveImmediateForShiftedOp(temp, operand.GetImmediate(), mode); + AddSub(rd, rn, imm_operand, S, op); + } else { + Mov(temp, operand); + AddSub(rd, rn, temp, S, op); + } + } else { + AddSub(rd, rn, operand, S, op); + } +} + + +void MacroAssembler::Adc(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC); +} + + +void MacroAssembler::Adcs(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC); +} + + +void MacroAssembler::Sbc(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC); +} + + +void MacroAssembler::Sbcs(const Register& rd, + const Register& rn, + const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC); +} + + +void MacroAssembler::Ngc(const Register& rd, const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + Register zr = AppropriateZeroRegFor(rd); + Sbc(rd, zr, operand); +} + + +void MacroAssembler::Ngcs(const Register& rd, const Operand& operand) { + VIXL_ASSERT(allow_macro_instructions_); + Register zr = AppropriateZeroRegFor(rd); + Sbcs(rd, zr, operand); +} + + +void MacroAssembler::AddSubWithCarryMacro(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubWithCarryOp op) { + VIXL_ASSERT(rd.GetSizeInBits() == rn.GetSizeInBits()); + // Worst case is addc/subc immediate: + // * up to 4 instructions to materialise the constant + // * 1 instruction for add/sub + MacroEmissionCheckScope guard(this); + UseScratchRegisterScope temps(this); + + if (operand.IsImmediate() || + (operand.IsShiftedRegister() && (operand.GetShift() == ROR))) { + // Add/sub with carry (immediate or ROR shifted register.) + Register temp = temps.AcquireSameSizeAs(rn); + Mov(temp, operand); + AddSubWithCarry(rd, rn, Operand(temp), S, op); + } else if (operand.IsShiftedRegister() && (operand.GetShiftAmount() != 0)) { + // Add/sub with carry (shifted register). + VIXL_ASSERT(operand.GetRegister().GetSizeInBits() == rd.GetSizeInBits()); + VIXL_ASSERT(operand.GetShift() != ROR); + VIXL_ASSERT( + IsUintN(rd.GetSizeInBits() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2, + operand.GetShiftAmount())); + temps.Exclude(operand.GetRegister()); + Register temp = temps.AcquireSameSizeAs(rn); + EmitShift(temp, + operand.GetRegister(), + operand.GetShift(), + operand.GetShiftAmount()); + AddSubWithCarry(rd, rn, Operand(temp), S, op); + } else if (operand.IsExtendedRegister()) { + // Add/sub with carry (extended register). + VIXL_ASSERT(operand.GetRegister().GetSizeInBits() <= rd.GetSizeInBits()); + // Add/sub extended supports a shift <= 4. We want to support exactly the + // same modes. + VIXL_ASSERT(operand.GetShiftAmount() <= 4); + VIXL_ASSERT( + operand.GetRegister().Is64Bits() || + ((operand.GetExtend() != UXTX) && (operand.GetExtend() != SXTX))); + temps.Exclude(operand.GetRegister()); + Register temp = temps.AcquireSameSizeAs(rn); + EmitExtendShift(temp, + operand.GetRegister(), + operand.GetExtend(), + operand.GetShiftAmount()); + AddSubWithCarry(rd, rn, Operand(temp), S, op); + } else { + // The addressing mode is directly supported by the instruction. + AddSubWithCarry(rd, rn, operand, S, op); + } +} + + +void MacroAssembler::Rmif(const Register& xn, + unsigned shift, + StatusFlags flags) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + rmif(xn, shift, flags); +} + + +void MacroAssembler::Setf8(const Register& wn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + setf8(wn); +} + + +void MacroAssembler::Setf16(const Register& wn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + setf16(wn); +} + + +#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \ + void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + LoadStoreMacro(REG, addr, OP); \ + } +LS_MACRO_LIST(DEFINE_FUNCTION) +#undef DEFINE_FUNCTION + + +void MacroAssembler::LoadStoreMacro(const CPURegister& rt, + const MemOperand& addr, + LoadStoreOp op) { + // Worst case is ldr/str pre/post index: + // * 1 instruction for ldr/str + // * up to 4 instructions to materialise the constant + // * 1 instruction to update the base + MacroEmissionCheckScope guard(this); + + int64_t offset = addr.GetOffset(); + unsigned access_size = CalcLSDataSize(op); + + // Check if an immediate offset fits in the immediate field of the + // appropriate instruction. If not, emit two instructions to perform + // the operation. + if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, access_size) && + !IsImmLSUnscaled(offset)) { + // Immediate offset that can't be encoded using unsigned or unscaled + // addressing modes. + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireSameSizeAs(addr.GetBaseRegister()); + Mov(temp, addr.GetOffset()); + LoadStore(rt, MemOperand(addr.GetBaseRegister(), temp), op); + } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) { + // Post-index beyond unscaled addressing range. + LoadStore(rt, MemOperand(addr.GetBaseRegister()), op); + Add(addr.GetBaseRegister(), addr.GetBaseRegister(), Operand(offset)); + } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) { + // Pre-index beyond unscaled addressing range. + Add(addr.GetBaseRegister(), addr.GetBaseRegister(), Operand(offset)); + LoadStore(rt, MemOperand(addr.GetBaseRegister()), op); + } else { + // Encodable in one load/store instruction. + LoadStore(rt, addr, op); + } +} + + +#define DEFINE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \ + void MacroAssembler::FN(const REGTYPE REG, \ + const REGTYPE REG2, \ + const MemOperand& addr) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + LoadStorePairMacro(REG, REG2, addr, OP); \ + } +LSPAIR_MACRO_LIST(DEFINE_FUNCTION) +#undef DEFINE_FUNCTION + +void MacroAssembler::LoadStorePairMacro(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairOp op) { + // TODO(all): Should we support register offset for load-store-pair? + VIXL_ASSERT(!addr.IsRegisterOffset()); + // Worst case is ldp/stp immediate: + // * 1 instruction for ldp/stp + // * up to 4 instructions to materialise the constant + // * 1 instruction to update the base + MacroEmissionCheckScope guard(this); + + int64_t offset = addr.GetOffset(); + unsigned access_size = CalcLSPairDataSize(op); + + // Check if the offset fits in the immediate field of the appropriate + // instruction. If not, emit two instructions to perform the operation. + if (IsImmLSPair(offset, access_size)) { + // Encodable in one load/store pair instruction. + LoadStorePair(rt, rt2, addr, op); + } else { + Register base = addr.GetBaseRegister(); + if (addr.IsImmediateOffset()) { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireSameSizeAs(base); + Add(temp, base, offset); + LoadStorePair(rt, rt2, MemOperand(temp), op); + } else if (addr.IsPostIndex()) { + LoadStorePair(rt, rt2, MemOperand(base), op); + Add(base, base, offset); + } else { + VIXL_ASSERT(addr.IsPreIndex()); + Add(base, base, offset); + LoadStorePair(rt, rt2, MemOperand(base), op); + } + } +} + + +void MacroAssembler::Prfm(PrefetchOperation op, const MemOperand& addr) { + MacroEmissionCheckScope guard(this); + + // There are no pre- or post-index modes for prfm. + VIXL_ASSERT(addr.IsImmediateOffset() || addr.IsRegisterOffset()); + + // The access size is implicitly 8 bytes for all prefetch operations. + unsigned size = kXRegSizeInBytesLog2; + + // Check if an immediate offset fits in the immediate field of the + // appropriate instruction. If not, emit two instructions to perform + // the operation. + if (addr.IsImmediateOffset() && !IsImmLSScaled(addr.GetOffset(), size) && + !IsImmLSUnscaled(addr.GetOffset())) { + // Immediate offset that can't be encoded using unsigned or unscaled + // addressing modes. + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireSameSizeAs(addr.GetBaseRegister()); + Mov(temp, addr.GetOffset()); + Prefetch(op, MemOperand(addr.GetBaseRegister(), temp)); + } else { + // Simple register-offsets are encodable in one instruction. + Prefetch(op, addr); + } +} + + +void MacroAssembler::Push(const CPURegister& src0, + const CPURegister& src1, + const CPURegister& src2, + const CPURegister& src3) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); + VIXL_ASSERT(src0.IsValid()); + + int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid(); + int size = src0.GetSizeInBytes(); + + PrepareForPush(count, size); + PushHelper(count, size, src0, src1, src2, src3); +} + + +void MacroAssembler::Pop(const CPURegister& dst0, + const CPURegister& dst1, + const CPURegister& dst2, + const CPURegister& dst3) { + // It is not valid to pop into the same register more than once in one + // instruction, not even into the zero register. + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!AreAliased(dst0, dst1, dst2, dst3)); + VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3)); + VIXL_ASSERT(dst0.IsValid()); + + int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid(); + int size = dst0.GetSizeInBytes(); + + PrepareForPop(count, size); + PopHelper(count, size, dst0, dst1, dst2, dst3); +} + + +void MacroAssembler::PushCPURegList(CPURegList registers) { + VIXL_ASSERT(!registers.Overlaps(*GetScratchRegisterList())); + VIXL_ASSERT(!registers.Overlaps(*GetScratchFPRegisterList())); + VIXL_ASSERT(allow_macro_instructions_); + + int reg_size = registers.GetRegisterSizeInBytes(); + PrepareForPush(registers.GetCount(), reg_size); + + // Bump the stack pointer and store two registers at the bottom. + int size = registers.GetTotalSizeInBytes(); + const CPURegister& bottom_0 = registers.PopLowestIndex(); + const CPURegister& bottom_1 = registers.PopLowestIndex(); + if (bottom_0.IsValid() && bottom_1.IsValid()) { + Stp(bottom_0, bottom_1, MemOperand(StackPointer(), -size, PreIndex)); + } else if (bottom_0.IsValid()) { + Str(bottom_0, MemOperand(StackPointer(), -size, PreIndex)); + } + + int offset = 2 * reg_size; + while (!registers.IsEmpty()) { + const CPURegister& src0 = registers.PopLowestIndex(); + const CPURegister& src1 = registers.PopLowestIndex(); + if (src1.IsValid()) { + Stp(src0, src1, MemOperand(StackPointer(), offset)); + } else { + Str(src0, MemOperand(StackPointer(), offset)); + } + offset += 2 * reg_size; + } +} + + +void MacroAssembler::PopCPURegList(CPURegList registers) { + VIXL_ASSERT(!registers.Overlaps(*GetScratchRegisterList())); + VIXL_ASSERT(!registers.Overlaps(*GetScratchFPRegisterList())); + VIXL_ASSERT(allow_macro_instructions_); + + int reg_size = registers.GetRegisterSizeInBytes(); + PrepareForPop(registers.GetCount(), reg_size); + + + int size = registers.GetTotalSizeInBytes(); + const CPURegister& bottom_0 = registers.PopLowestIndex(); + const CPURegister& bottom_1 = registers.PopLowestIndex(); + + int offset = 2 * reg_size; + while (!registers.IsEmpty()) { + const CPURegister& dst0 = registers.PopLowestIndex(); + const CPURegister& dst1 = registers.PopLowestIndex(); + if (dst1.IsValid()) { + Ldp(dst0, dst1, MemOperand(StackPointer(), offset)); + } else { + Ldr(dst0, MemOperand(StackPointer(), offset)); + } + offset += 2 * reg_size; + } + + // Load the two registers at the bottom and drop the stack pointer. + if (bottom_0.IsValid() && bottom_1.IsValid()) { + Ldp(bottom_0, bottom_1, MemOperand(StackPointer(), size, PostIndex)); + } else if (bottom_0.IsValid()) { + Ldr(bottom_0, MemOperand(StackPointer(), size, PostIndex)); + } +} + + +void MacroAssembler::PushMultipleTimes(int count, Register src) { + VIXL_ASSERT(allow_macro_instructions_); + int size = src.GetSizeInBytes(); + + PrepareForPush(count, size); + // Push up to four registers at a time if possible because if the current + // stack pointer is sp and the register size is 32, registers must be pushed + // in blocks of four in order to maintain the 16-byte alignment for sp. + while (count >= 4) { + PushHelper(4, size, src, src, src, src); + count -= 4; + } + if (count >= 2) { + PushHelper(2, size, src, src, NoReg, NoReg); + count -= 2; + } + if (count == 1) { + PushHelper(1, size, src, NoReg, NoReg, NoReg); + count -= 1; + } + VIXL_ASSERT(count == 0); +} + + +void MacroAssembler::PushHelper(int count, + int size, + const CPURegister& src0, + const CPURegister& src1, + const CPURegister& src2, + const CPURegister& src3) { + // Ensure that we don't unintentionally modify scratch or debug registers. + // Worst case for size is 2 stp. + ExactAssemblyScope scope(this, + 2 * kInstructionSize, + ExactAssemblyScope::kMaximumSize); + + VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); + VIXL_ASSERT(size == src0.GetSizeInBytes()); + + // When pushing multiple registers, the store order is chosen such that + // Push(a, b) is equivalent to Push(a) followed by Push(b). + switch (count) { + case 1: + VIXL_ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone()); + str(src0, MemOperand(StackPointer(), -1 * size, PreIndex)); + break; + case 2: + VIXL_ASSERT(src2.IsNone() && src3.IsNone()); + stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex)); + break; + case 3: + VIXL_ASSERT(src3.IsNone()); + stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex)); + str(src0, MemOperand(StackPointer(), 2 * size)); + break; + case 4: + // Skip over 4 * size, then fill in the gap. This allows four W registers + // to be pushed using sp, whilst maintaining 16-byte alignment for sp at + // all times. + stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex)); + stp(src1, src0, MemOperand(StackPointer(), 2 * size)); + break; + default: + VIXL_UNREACHABLE(); + } +} + + +void MacroAssembler::PopHelper(int count, + int size, + const CPURegister& dst0, + const CPURegister& dst1, + const CPURegister& dst2, + const CPURegister& dst3) { + // Ensure that we don't unintentionally modify scratch or debug registers. + // Worst case for size is 2 ldp. + ExactAssemblyScope scope(this, + 2 * kInstructionSize, + ExactAssemblyScope::kMaximumSize); + + VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3)); + VIXL_ASSERT(size == dst0.GetSizeInBytes()); + + // When popping multiple registers, the load order is chosen such that + // Pop(a, b) is equivalent to Pop(a) followed by Pop(b). + switch (count) { + case 1: + VIXL_ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone()); + ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex)); + break; + case 2: + VIXL_ASSERT(dst2.IsNone() && dst3.IsNone()); + ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex)); + break; + case 3: + VIXL_ASSERT(dst3.IsNone()); + ldr(dst2, MemOperand(StackPointer(), 2 * size)); + ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex)); + break; + case 4: + // Load the higher addresses first, then load the lower addresses and skip + // the whole block in the second instruction. This allows four W registers + // to be popped using sp, whilst maintaining 16-byte alignment for sp at + // all times. + ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size)); + ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex)); + break; + default: + VIXL_UNREACHABLE(); + } +} + + +void MacroAssembler::PrepareForPush(int count, int size) { + if (sp.Is(StackPointer())) { + // If the current stack pointer is sp, then it must be aligned to 16 bytes + // on entry and the total size of the specified registers must also be a + // multiple of 16 bytes. + VIXL_ASSERT((count * size) % 16 == 0); + } else { + // Even if the current stack pointer is not the system stack pointer (sp), + // the system stack pointer will still be modified in order to comply with + // ABI rules about accessing memory below the system stack pointer. + BumpSystemStackPointer(count * size); + } +} + + +void MacroAssembler::PrepareForPop(int count, int size) { + USE(count, size); + if (sp.Is(StackPointer())) { + // If the current stack pointer is sp, then it must be aligned to 16 bytes + // on entry and the total size of the specified registers must also be a + // multiple of 16 bytes. + VIXL_ASSERT((count * size) % 16 == 0); + } +} + +void MacroAssembler::Poke(const Register& src, const Operand& offset) { + VIXL_ASSERT(allow_macro_instructions_); + if (offset.IsImmediate()) { + VIXL_ASSERT(offset.GetImmediate() >= 0); + } + + Str(src, MemOperand(StackPointer(), offset)); +} + + +void MacroAssembler::Peek(const Register& dst, const Operand& offset) { + VIXL_ASSERT(allow_macro_instructions_); + if (offset.IsImmediate()) { + VIXL_ASSERT(offset.GetImmediate() >= 0); + } + + Ldr(dst, MemOperand(StackPointer(), offset)); +} + + +void MacroAssembler::Claim(const Operand& size) { + VIXL_ASSERT(allow_macro_instructions_); + + if (size.IsZero()) { + return; + } + + if (size.IsImmediate()) { + VIXL_ASSERT(size.GetImmediate() > 0); + if (sp.Is(StackPointer())) { + VIXL_ASSERT((size.GetImmediate() % 16) == 0); + } + } + + if (!sp.Is(StackPointer())) { + BumpSystemStackPointer(size); + } + + Sub(StackPointer(), StackPointer(), size); +} + + +void MacroAssembler::Drop(const Operand& size) { + VIXL_ASSERT(allow_macro_instructions_); + + if (size.IsZero()) { + return; + } + + if (size.IsImmediate()) { + VIXL_ASSERT(size.GetImmediate() > 0); + if (sp.Is(StackPointer())) { + VIXL_ASSERT((size.GetImmediate() % 16) == 0); + } + } + + Add(StackPointer(), StackPointer(), size); +} + + +void MacroAssembler::PushCalleeSavedRegisters() { + // Ensure that the macro-assembler doesn't use any scratch registers. + // 10 stp will be emitted. + // TODO(all): Should we use GetCalleeSaved and SavedFP. + ExactAssemblyScope scope(this, 10 * kInstructionSize); + + // This method must not be called unless the current stack pointer is sp. + VIXL_ASSERT(sp.Is(StackPointer())); + + MemOperand tos(sp, -2 * static_cast(kXRegSizeInBytes), PreIndex); + + stp(x29, x30, tos); + stp(x27, x28, tos); + stp(x25, x26, tos); + stp(x23, x24, tos); + stp(x21, x22, tos); + stp(x19, x20, tos); + + stp(d14, d15, tos); + stp(d12, d13, tos); + stp(d10, d11, tos); + stp(d8, d9, tos); +} + + +void MacroAssembler::PopCalleeSavedRegisters() { + // Ensure that the macro-assembler doesn't use any scratch registers. + // 10 ldp will be emitted. + // TODO(all): Should we use GetCalleeSaved and SavedFP. + ExactAssemblyScope scope(this, 10 * kInstructionSize); + + // This method must not be called unless the current stack pointer is sp. + VIXL_ASSERT(sp.Is(StackPointer())); + + MemOperand tos(sp, 2 * kXRegSizeInBytes, PostIndex); + + ldp(d8, d9, tos); + ldp(d10, d11, tos); + ldp(d12, d13, tos); + ldp(d14, d15, tos); + + ldp(x19, x20, tos); + ldp(x21, x22, tos); + ldp(x23, x24, tos); + ldp(x25, x26, tos); + ldp(x27, x28, tos); + ldp(x29, x30, tos); +} + +void MacroAssembler::LoadCPURegList(CPURegList registers, + const MemOperand& src) { + LoadStoreCPURegListHelper(kLoad, registers, src); +} + +void MacroAssembler::StoreCPURegList(CPURegList registers, + const MemOperand& dst) { + LoadStoreCPURegListHelper(kStore, registers, dst); +} + + +void MacroAssembler::LoadStoreCPURegListHelper(LoadStoreCPURegListAction op, + CPURegList registers, + const MemOperand& mem) { + // We do not handle pre-indexing or post-indexing. + VIXL_ASSERT(!(mem.IsPreIndex() || mem.IsPostIndex())); + VIXL_ASSERT(!registers.Overlaps(tmp_list_)); + VIXL_ASSERT(!registers.Overlaps(fptmp_list_)); + VIXL_ASSERT(!registers.IncludesAliasOf(sp)); + + UseScratchRegisterScope temps(this); + + MemOperand loc = BaseMemOperandForLoadStoreCPURegList(registers, mem, &temps); + const int reg_size = registers.GetRegisterSizeInBytes(); + + VIXL_ASSERT(IsPowerOf2(reg_size)); + + // Since we are operating on register pairs, we would like to align on double + // the standard size; on the other hand, we don't want to insert an extra + // operation, which will happen if the number of registers is even. Note that + // the alignment of the base pointer is unknown here, but we assume that it + // is more likely to be aligned. + if (((loc.GetOffset() & (2 * reg_size - 1)) != 0) && + ((registers.GetCount() % 2) != 0)) { + if (op == kStore) { + Str(registers.PopLowestIndex(), loc); + } else { + VIXL_ASSERT(op == kLoad); + Ldr(registers.PopLowestIndex(), loc); + } + loc.AddOffset(reg_size); + } + while (registers.GetCount() >= 2) { + const CPURegister& dst0 = registers.PopLowestIndex(); + const CPURegister& dst1 = registers.PopLowestIndex(); + if (op == kStore) { + Stp(dst0, dst1, loc); + } else { + VIXL_ASSERT(op == kLoad); + Ldp(dst0, dst1, loc); + } + loc.AddOffset(2 * reg_size); + } + if (!registers.IsEmpty()) { + if (op == kStore) { + Str(registers.PopLowestIndex(), loc); + } else { + VIXL_ASSERT(op == kLoad); + Ldr(registers.PopLowestIndex(), loc); + } + } +} + +MemOperand MacroAssembler::BaseMemOperandForLoadStoreCPURegList( + const CPURegList& registers, + const MemOperand& mem, + UseScratchRegisterScope* scratch_scope) { + // If necessary, pre-compute the base address for the accesses. + if (mem.IsRegisterOffset()) { + Register reg_base = scratch_scope->AcquireX(); + ComputeAddress(reg_base, mem); + return MemOperand(reg_base); + + } else if (mem.IsImmediateOffset()) { + int reg_size = registers.GetRegisterSizeInBytes(); + int total_size = registers.GetTotalSizeInBytes(); + int64_t min_offset = mem.GetOffset(); + int64_t max_offset = + mem.GetOffset() + std::max(0, total_size - 2 * reg_size); + if ((registers.GetCount() >= 2) && + (!Assembler::IsImmLSPair(min_offset, WhichPowerOf2(reg_size)) || + !Assembler::IsImmLSPair(max_offset, WhichPowerOf2(reg_size)))) { + Register reg_base = scratch_scope->AcquireX(); + ComputeAddress(reg_base, mem); + return MemOperand(reg_base); + } + } + + return mem; +} + +void MacroAssembler::BumpSystemStackPointer(const Operand& space) { + VIXL_ASSERT(!sp.Is(StackPointer())); + // TODO: Several callers rely on this not using scratch registers, so we use + // the assembler directly here. However, this means that large immediate + // values of 'space' cannot be handled. + ExactAssemblyScope scope(this, kInstructionSize); + sub(sp, StackPointer(), space); +} + + +// TODO(all): Fix printf for NEON registers, and resolve whether we should be +// using FPRegister or VRegister here. + +// This is the main Printf implementation. All callee-saved registers are +// preserved, but NZCV and the caller-saved registers may be clobbered. +void MacroAssembler::PrintfNoPreserve(const char* format, + const CPURegister& arg0, + const CPURegister& arg1, + const CPURegister& arg2, + const CPURegister& arg3) { + // We cannot handle a caller-saved stack pointer. It doesn't make much sense + // in most cases anyway, so this restriction shouldn't be too serious. + VIXL_ASSERT(!kCallerSaved.IncludesAliasOf(StackPointer())); + + // The provided arguments, and their proper PCS registers. + CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3}; + CPURegister pcs[kPrintfMaxArgCount]; + + int arg_count = kPrintfMaxArgCount; + + // The PCS varargs registers for printf. Note that x0 is used for the printf + // format string. + static const CPURegList kPCSVarargs = + CPURegList(CPURegister::kRegister, kXRegSize, 1, arg_count); + static const CPURegList kPCSVarargsFP = + CPURegList(CPURegister::kVRegister, kDRegSize, 0, arg_count - 1); + + // We can use caller-saved registers as scratch values, except for the + // arguments and the PCS registers where they might need to go. + UseScratchRegisterScope temps(this); + temps.Include(kCallerSaved); + temps.Include(kCallerSavedV); + temps.Exclude(kPCSVarargs); + temps.Exclude(kPCSVarargsFP); + temps.Exclude(arg0, arg1, arg2, arg3); + + // Copies of the arg lists that we can iterate through. + CPURegList pcs_varargs = kPCSVarargs; + CPURegList pcs_varargs_fp = kPCSVarargsFP; + + // Place the arguments. There are lots of clever tricks and optimizations we + // could use here, but Printf is a debug tool so instead we just try to keep + // it simple: Move each input that isn't already in the right place to a + // scratch register, then move everything back. + for (unsigned i = 0; i < kPrintfMaxArgCount; i++) { + // Work out the proper PCS register for this argument. + if (args[i].IsRegister()) { + pcs[i] = pcs_varargs.PopLowestIndex().X(); + // We might only need a W register here. We need to know the size of the + // argument so we can properly encode it for the simulator call. + if (args[i].Is32Bits()) pcs[i] = pcs[i].W(); + } else if (args[i].IsVRegister()) { + // In C, floats are always cast to doubles for varargs calls. + pcs[i] = pcs_varargs_fp.PopLowestIndex().D(); + } else { + VIXL_ASSERT(args[i].IsNone()); + arg_count = i; + break; + } + + // If the argument is already in the right place, leave it where it is. + if (args[i].Aliases(pcs[i])) continue; + + // Otherwise, if the argument is in a PCS argument register, allocate an + // appropriate scratch register and then move it out of the way. + if (kPCSVarargs.IncludesAliasOf(args[i]) || + kPCSVarargsFP.IncludesAliasOf(args[i])) { + if (args[i].IsRegister()) { + Register old_arg = Register(args[i]); + Register new_arg = temps.AcquireSameSizeAs(old_arg); + Mov(new_arg, old_arg); + args[i] = new_arg; + } else { + FPRegister old_arg = FPRegister(args[i]); + FPRegister new_arg = temps.AcquireSameSizeAs(old_arg); + Fmov(new_arg, old_arg); + args[i] = new_arg; + } + } + } + + // Do a second pass to move values into their final positions and perform any + // conversions that may be required. + for (int i = 0; i < arg_count; i++) { + VIXL_ASSERT(pcs[i].GetType() == args[i].GetType()); + if (pcs[i].IsRegister()) { + Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg); + } else { + VIXL_ASSERT(pcs[i].IsVRegister()); + if (pcs[i].GetSizeInBits() == args[i].GetSizeInBits()) { + Fmov(FPRegister(pcs[i]), FPRegister(args[i])); + } else { + Fcvt(FPRegister(pcs[i]), FPRegister(args[i])); + } + } + } + + // Load the format string into x0, as per the procedure-call standard. + // + // To make the code as portable as possible, the format string is encoded + // directly in the instruction stream. It might be cleaner to encode it in a + // literal pool, but since Printf is usually used for debugging, it is + // beneficial for it to be minimally dependent on other features. + temps.Exclude(x0); + Label format_address; + Adr(x0, &format_address); + + // Emit the format string directly in the instruction stream. + { + BlockPoolsScope scope(this); + // Data emitted: + // branch + // strlen(format) + 1 (includes null termination) + // padding to next instruction + // unreachable + EmissionCheckScope guard(this, + AlignUp(strlen(format) + 1, kInstructionSize) + + 2 * kInstructionSize); + Label after_data; + B(&after_data); + Bind(&format_address); + EmitString(format); + Unreachable(); + Bind(&after_data); + } + + // We don't pass any arguments on the stack, but we still need to align the C + // stack pointer to a 16-byte boundary for PCS compliance. + if (!sp.Is(StackPointer())) { + Bic(sp, StackPointer(), 0xf); + } + + // Actually call printf. This part needs special handling for the simulator, + // since the system printf function will use a different instruction set and + // the procedure-call standard will not be compatible. + if (generate_simulator_code_) { + ExactAssemblyScope scope(this, kPrintfLength); + hlt(kPrintfOpcode); + dc32(arg_count); // kPrintfArgCountOffset + + // Determine the argument pattern. + uint32_t arg_pattern_list = 0; + for (int i = 0; i < arg_count; i++) { + uint32_t arg_pattern; + if (pcs[i].IsRegister()) { + arg_pattern = pcs[i].Is32Bits() ? kPrintfArgW : kPrintfArgX; + } else { + VIXL_ASSERT(pcs[i].Is64Bits()); + arg_pattern = kPrintfArgD; + } + VIXL_ASSERT(arg_pattern < (1 << kPrintfArgPatternBits)); + arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i)); + } + dc32(arg_pattern_list); // kPrintfArgPatternListOffset + } else { + Register tmp = temps.AcquireX(); + Mov(tmp, reinterpret_cast(printf)); + Blr(tmp); + } +} + + +void MacroAssembler::Printf(const char* format, + CPURegister arg0, + CPURegister arg1, + CPURegister arg2, + CPURegister arg3) { + // We can only print sp if it is the current stack pointer. + if (!sp.Is(StackPointer())) { + VIXL_ASSERT(!sp.Aliases(arg0)); + VIXL_ASSERT(!sp.Aliases(arg1)); + VIXL_ASSERT(!sp.Aliases(arg2)); + VIXL_ASSERT(!sp.Aliases(arg3)); + } + + // Make sure that the macro assembler doesn't try to use any of our arguments + // as scratch registers. + UseScratchRegisterScope exclude_all(this); + exclude_all.ExcludeAll(); + + // Preserve all caller-saved registers as well as NZCV. + // If sp is the stack pointer, PushCPURegList asserts that the size of each + // list is a multiple of 16 bytes. + PushCPURegList(kCallerSaved); + PushCPURegList(kCallerSavedV); + + { + UseScratchRegisterScope temps(this); + // We can use caller-saved registers as scratch values (except for argN). + temps.Include(kCallerSaved); + temps.Include(kCallerSavedV); + temps.Exclude(arg0, arg1, arg2, arg3); + + // If any of the arguments are the current stack pointer, allocate a new + // register for them, and adjust the value to compensate for pushing the + // caller-saved registers. + bool arg0_sp = StackPointer().Aliases(arg0); + bool arg1_sp = StackPointer().Aliases(arg1); + bool arg2_sp = StackPointer().Aliases(arg2); + bool arg3_sp = StackPointer().Aliases(arg3); + if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) { + // Allocate a register to hold the original stack pointer value, to pass + // to PrintfNoPreserve as an argument. + Register arg_sp = temps.AcquireX(); + Add(arg_sp, + StackPointer(), + kCallerSaved.GetTotalSizeInBytes() + + kCallerSavedV.GetTotalSizeInBytes()); + if (arg0_sp) arg0 = Register(arg_sp.GetCode(), arg0.GetSizeInBits()); + if (arg1_sp) arg1 = Register(arg_sp.GetCode(), arg1.GetSizeInBits()); + if (arg2_sp) arg2 = Register(arg_sp.GetCode(), arg2.GetSizeInBits()); + if (arg3_sp) arg3 = Register(arg_sp.GetCode(), arg3.GetSizeInBits()); + } + + // Preserve NZCV. + Register tmp = temps.AcquireX(); + Mrs(tmp, NZCV); + Push(tmp, xzr); + temps.Release(tmp); + + PrintfNoPreserve(format, arg0, arg1, arg2, arg3); + + // Restore NZCV. + tmp = temps.AcquireX(); + Pop(xzr, tmp); + Msr(NZCV, tmp); + temps.Release(tmp); + } + + PopCPURegList(kCallerSavedV); + PopCPURegList(kCallerSaved); +} + +void MacroAssembler::Trace(TraceParameters parameters, TraceCommand command) { + VIXL_ASSERT(allow_macro_instructions_); + + if (generate_simulator_code_) { + // The arguments to the trace pseudo instruction need to be contiguous in + // memory, so make sure we don't try to emit a literal pool. + ExactAssemblyScope scope(this, kTraceLength); + + Label start; + bind(&start); + + // Refer to simulator-aarch64.h for a description of the marker and its + // arguments. + hlt(kTraceOpcode); + + VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) == kTraceParamsOffset); + dc32(parameters); + + VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) == kTraceCommandOffset); + dc32(command); + } else { + // Emit nothing on real hardware. + USE(parameters, command); + } +} + + +void MacroAssembler::Log(TraceParameters parameters) { + VIXL_ASSERT(allow_macro_instructions_); + + if (generate_simulator_code_) { + // The arguments to the log pseudo instruction need to be contiguous in + // memory, so make sure we don't try to emit a literal pool. + ExactAssemblyScope scope(this, kLogLength); + + Label start; + bind(&start); + + // Refer to simulator-aarch64.h for a description of the marker and its + // arguments. + hlt(kLogOpcode); + + VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) == kLogParamsOffset); + dc32(parameters); + } else { + // Emit nothing on real hardware. + USE(parameters); + } +} + + +void MacroAssembler::EnableInstrumentation() { + VIXL_ASSERT(!isprint(InstrumentStateEnable)); + ExactAssemblyScope scope(this, kInstructionSize); + movn(xzr, InstrumentStateEnable); +} + + +void MacroAssembler::DisableInstrumentation() { + VIXL_ASSERT(!isprint(InstrumentStateDisable)); + ExactAssemblyScope scope(this, kInstructionSize); + movn(xzr, InstrumentStateDisable); +} + + +void MacroAssembler::AnnotateInstrumentation(const char* marker_name) { + VIXL_ASSERT(strlen(marker_name) == 2); + + // We allow only printable characters in the marker names. Unprintable + // characters are reserved for controlling features of the instrumentation. + VIXL_ASSERT(isprint(marker_name[0]) && isprint(marker_name[1])); + + ExactAssemblyScope scope(this, kInstructionSize); + movn(xzr, (marker_name[1] << 8) | marker_name[0]); +} + + +void MacroAssembler::SetSimulatorCPUFeatures(const CPUFeatures& features) { + ConfigureSimulatorCPUFeaturesHelper(features, kSetCPUFeaturesOpcode); +} + + +void MacroAssembler::EnableSimulatorCPUFeatures(const CPUFeatures& features) { + ConfigureSimulatorCPUFeaturesHelper(features, kEnableCPUFeaturesOpcode); +} + + +void MacroAssembler::DisableSimulatorCPUFeatures(const CPUFeatures& features) { + ConfigureSimulatorCPUFeaturesHelper(features, kDisableCPUFeaturesOpcode); +} + + +void MacroAssembler::ConfigureSimulatorCPUFeaturesHelper( + const CPUFeatures& features, DebugHltOpcode action) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(generate_simulator_code_); + + typedef ConfigureCPUFeaturesElementType ElementType; + VIXL_ASSERT(CPUFeatures::kNumberOfFeatures <= + std::numeric_limits::max()); + + size_t count = features.Count(); + + size_t preamble_length = kConfigureCPUFeaturesListOffset; + size_t list_length = (count + 1) * sizeof(ElementType); + size_t padding_length = AlignUp(list_length, kInstructionSize) - list_length; + + size_t total_length = preamble_length + list_length + padding_length; + + // Check the overall code size as well as the size of each component. + ExactAssemblyScope guard_total(this, total_length); + + { // Preamble: the opcode itself. + ExactAssemblyScope guard_preamble(this, preamble_length); + hlt(action); + } + { // A kNone-terminated list of features. + ExactAssemblyScope guard_list(this, list_length); + for (CPUFeatures::const_iterator it = features.begin(); + it != features.end(); + ++it) { + dc(static_cast(*it)); + } + dc(static_cast(CPUFeatures::kNone)); + } + { // Padding for instruction alignment. + ExactAssemblyScope guard_padding(this, padding_length); + for (size_t size = 0; size < padding_length; size += sizeof(ElementType)) { + // The exact value is arbitrary. + dc(static_cast(CPUFeatures::kNone)); + } + } +} + +void MacroAssembler::SaveSimulatorCPUFeatures() { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(generate_simulator_code_); + SingleEmissionCheckScope guard(this); + hlt(kSaveCPUFeaturesOpcode); +} + + +void MacroAssembler::RestoreSimulatorCPUFeatures() { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(generate_simulator_code_); + SingleEmissionCheckScope guard(this); + hlt(kRestoreCPUFeaturesOpcode); +} + + +void UseScratchRegisterScope::Open(MacroAssembler* masm) { + VIXL_ASSERT(masm_ == NULL); + VIXL_ASSERT(masm != NULL); + masm_ = masm; + + CPURegList* available = masm->GetScratchRegisterList(); + CPURegList* available_fp = masm->GetScratchFPRegisterList(); + old_available_ = available->GetList(); + old_availablefp_ = available_fp->GetList(); + VIXL_ASSERT(available->GetType() == CPURegister::kRegister); + VIXL_ASSERT(available_fp->GetType() == CPURegister::kVRegister); + + parent_ = masm->GetCurrentScratchRegisterScope(); + masm->SetCurrentScratchRegisterScope(this); +} + + +void UseScratchRegisterScope::Close() { + if (masm_ != NULL) { + // Ensure that scopes nest perfectly, and do not outlive their parents. + // This is a run-time check because the order of destruction of objects in + // the _same_ scope is implementation-defined, and is likely to change in + // optimised builds. + VIXL_CHECK(masm_->GetCurrentScratchRegisterScope() == this); + masm_->SetCurrentScratchRegisterScope(parent_); + + masm_->GetScratchRegisterList()->SetList(old_available_); + masm_->GetScratchFPRegisterList()->SetList(old_availablefp_); + + masm_ = NULL; + } +} + + +bool UseScratchRegisterScope::IsAvailable(const CPURegister& reg) const { + return masm_->GetScratchRegisterList()->IncludesAliasOf(reg) || + masm_->GetScratchFPRegisterList()->IncludesAliasOf(reg); +} + + +Register UseScratchRegisterScope::AcquireRegisterOfSize(int size_in_bits) { + int code = AcquireNextAvailable(masm_->GetScratchRegisterList()).GetCode(); + return Register(code, size_in_bits); +} + + +FPRegister UseScratchRegisterScope::AcquireVRegisterOfSize(int size_in_bits) { + int code = AcquireNextAvailable(masm_->GetScratchFPRegisterList()).GetCode(); + return FPRegister(code, size_in_bits); +} + + +void UseScratchRegisterScope::Release(const CPURegister& reg) { + VIXL_ASSERT(masm_ != NULL); + if (reg.IsRegister()) { + ReleaseByCode(masm_->GetScratchRegisterList(), reg.GetCode()); + } else if (reg.IsVRegister()) { + ReleaseByCode(masm_->GetScratchFPRegisterList(), reg.GetCode()); + } else { + VIXL_ASSERT(reg.IsNone()); + } +} + + +void UseScratchRegisterScope::Include(const CPURegList& list) { + VIXL_ASSERT(masm_ != NULL); + if (list.GetType() == CPURegister::kRegister) { + // Make sure that neither sp nor xzr are included the list. + IncludeByRegList(masm_->GetScratchRegisterList(), + list.GetList() & ~(xzr.GetBit() | sp.GetBit())); + } else { + VIXL_ASSERT(list.GetType() == CPURegister::kVRegister); + IncludeByRegList(masm_->GetScratchFPRegisterList(), list.GetList()); + } +} + + +void UseScratchRegisterScope::Include(const Register& reg1, + const Register& reg2, + const Register& reg3, + const Register& reg4) { + VIXL_ASSERT(masm_ != NULL); + RegList include = + reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit(); + // Make sure that neither sp nor xzr are included the list. + include &= ~(xzr.GetBit() | sp.GetBit()); + + IncludeByRegList(masm_->GetScratchRegisterList(), include); +} + + +void UseScratchRegisterScope::Include(const FPRegister& reg1, + const FPRegister& reg2, + const FPRegister& reg3, + const FPRegister& reg4) { + RegList include = + reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit(); + IncludeByRegList(masm_->GetScratchFPRegisterList(), include); +} + + +void UseScratchRegisterScope::Exclude(const CPURegList& list) { + if (list.GetType() == CPURegister::kRegister) { + ExcludeByRegList(masm_->GetScratchRegisterList(), list.GetList()); + } else { + VIXL_ASSERT(list.GetType() == CPURegister::kVRegister); + ExcludeByRegList(masm_->GetScratchFPRegisterList(), list.GetList()); + } +} + + +void UseScratchRegisterScope::Exclude(const Register& reg1, + const Register& reg2, + const Register& reg3, + const Register& reg4) { + RegList exclude = + reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit(); + ExcludeByRegList(masm_->GetScratchRegisterList(), exclude); +} + + +void UseScratchRegisterScope::Exclude(const FPRegister& reg1, + const FPRegister& reg2, + const FPRegister& reg3, + const FPRegister& reg4) { + RegList excludefp = + reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit(); + ExcludeByRegList(masm_->GetScratchFPRegisterList(), excludefp); +} + + +void UseScratchRegisterScope::Exclude(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3, + const CPURegister& reg4) { + RegList exclude = 0; + RegList excludefp = 0; + + const CPURegister regs[] = {reg1, reg2, reg3, reg4}; + + for (size_t i = 0; i < ArrayLength(regs); i++) { + if (regs[i].IsRegister()) { + exclude |= regs[i].GetBit(); + } else if (regs[i].IsFPRegister()) { + excludefp |= regs[i].GetBit(); + } else { + VIXL_ASSERT(regs[i].IsNone()); + } + } + + ExcludeByRegList(masm_->GetScratchRegisterList(), exclude); + ExcludeByRegList(masm_->GetScratchFPRegisterList(), excludefp); +} + + +void UseScratchRegisterScope::ExcludeAll() { + ExcludeByRegList(masm_->GetScratchRegisterList(), + masm_->GetScratchRegisterList()->GetList()); + ExcludeByRegList(masm_->GetScratchFPRegisterList(), + masm_->GetScratchFPRegisterList()->GetList()); +} + + +CPURegister UseScratchRegisterScope::AcquireNextAvailable( + CPURegList* available) { + VIXL_CHECK(!available->IsEmpty()); + CPURegister result = available->PopLowestIndex(); + VIXL_ASSERT(!AreAliased(result, xzr, sp)); + return result; +} + + +void UseScratchRegisterScope::ReleaseByCode(CPURegList* available, int code) { + ReleaseByRegList(available, static_cast(1) << code); +} + + +void UseScratchRegisterScope::ReleaseByRegList(CPURegList* available, + RegList regs) { + available->SetList(available->GetList() | regs); +} + + +void UseScratchRegisterScope::IncludeByRegList(CPURegList* available, + RegList regs) { + available->SetList(available->GetList() | regs); +} + + +void UseScratchRegisterScope::ExcludeByRegList(CPURegList* available, + RegList exclude) { + available->SetList(available->GetList() & ~exclude); +} + +} // namespace aarch64 +} // namespace vixl diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/macro-assembler-aarch64.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/macro-assembler-aarch64.h new file mode 100644 index 00000000..bdd85494 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/macro-assembler-aarch64.h @@ -0,0 +1,4050 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_ +#define VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_ + +#include +#include + +#include "../code-generation-scopes-vixl.h" +#include "../globals-vixl.h" +#include "../macro-assembler-interface.h" + +#include "assembler-aarch64.h" +#include "instrument-aarch64.h" +// Required for runtime call support. +// TODO: Break this dependency. We should be able to separate out the necessary +// parts so that we don't need to include the whole simulator header. +#include "simulator-aarch64.h" +// Required in order to generate debugging instructions for the simulator. This +// is needed regardless of whether the simulator is included or not, since +// generating simulator specific instructions is controlled at runtime. +#include "simulator-constants-aarch64.h" + + +#define LS_MACRO_LIST(V) \ + V(Ldrb, Register&, rt, LDRB_w) \ + V(Strb, Register&, rt, STRB_w) \ + V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \ + V(Ldrh, Register&, rt, LDRH_w) \ + V(Strh, Register&, rt, STRH_w) \ + V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \ + V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \ + V(Str, CPURegister&, rt, StoreOpFor(rt)) \ + V(Ldrsw, Register&, rt, LDRSW_x) + + +#define LSPAIR_MACRO_LIST(V) \ + V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \ + V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \ + V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x) + +namespace vixl { +namespace aarch64 { + +// Forward declaration +class MacroAssembler; +class UseScratchRegisterScope; + +class Pool { + public: + explicit Pool(MacroAssembler* masm) + : checkpoint_(kNoCheckpointRequired), masm_(masm) { + Reset(); + } + + void Reset() { + checkpoint_ = kNoCheckpointRequired; + monitor_ = 0; + } + + void Block() { monitor_++; } + void Release(); + bool IsBlocked() const { return monitor_ != 0; } + + static const ptrdiff_t kNoCheckpointRequired = PTRDIFF_MAX; + + void SetNextCheckpoint(ptrdiff_t checkpoint); + ptrdiff_t GetCheckpoint() const { return checkpoint_; } + VIXL_DEPRECATED("GetCheckpoint", ptrdiff_t checkpoint() const) { + return GetCheckpoint(); + } + + enum EmitOption { kBranchRequired, kNoBranchRequired }; + + protected: + // Next buffer offset at which a check is required for this pool. + ptrdiff_t checkpoint_; + // Indicates whether the emission of this pool is blocked. + int monitor_; + // The MacroAssembler using this pool. + MacroAssembler* masm_; +}; + + +class LiteralPool : public Pool { + public: + explicit LiteralPool(MacroAssembler* masm); + ~LiteralPool(); + void Reset(); + + void AddEntry(RawLiteral* literal); + bool IsEmpty() const { return entries_.empty(); } + size_t GetSize() const; + VIXL_DEPRECATED("GetSize", size_t Size() const) { return GetSize(); } + + size_t GetMaxSize() const; + VIXL_DEPRECATED("GetMaxSize", size_t MaxSize() const) { return GetMaxSize(); } + + size_t GetOtherPoolsMaxSize() const; + VIXL_DEPRECATED("GetOtherPoolsMaxSize", size_t OtherPoolsMaxSize() const) { + return GetOtherPoolsMaxSize(); + } + + void CheckEmitFor(size_t amount, EmitOption option = kBranchRequired); + // Check whether we need to emit the literal pool in order to be able to + // safely emit a branch with a given range. + void CheckEmitForBranch(size_t range); + void Emit(EmitOption option = kNoBranchRequired); + + void SetNextRecommendedCheckpoint(ptrdiff_t offset); + ptrdiff_t GetNextRecommendedCheckpoint(); + VIXL_DEPRECATED("GetNextRecommendedCheckpoint", + ptrdiff_t NextRecommendedCheckpoint()) { + return GetNextRecommendedCheckpoint(); + } + + void UpdateFirstUse(ptrdiff_t use_position); + + void DeleteOnDestruction(RawLiteral* literal) { + deleted_on_destruction_.push_back(literal); + } + + // Recommended not exact since the pool can be blocked for short periods. + static const ptrdiff_t kRecommendedLiteralPoolRange = 128 * KBytes; + + private: + std::vector entries_; + size_t size_; + ptrdiff_t first_use_; + // The parent class `Pool` provides a `checkpoint_`, which is the buffer + // offset before which a check *must* occur. This recommended checkpoint + // indicates when we would like to start emitting the constant pool. The + // MacroAssembler can, but does not have to, check the buffer when the + // checkpoint is reached. + ptrdiff_t recommended_checkpoint_; + + std::vector deleted_on_destruction_; +}; + + +inline size_t LiteralPool::GetSize() const { + // Account for the pool header. + return size_ + kInstructionSize; +} + + +inline size_t LiteralPool::GetMaxSize() const { + // Account for the potential branch over the pool. + return GetSize() + kInstructionSize; +} + + +inline ptrdiff_t LiteralPool::GetNextRecommendedCheckpoint() { + return first_use_ + kRecommendedLiteralPoolRange; +} + + +class VeneerPool : public Pool { + public: + explicit VeneerPool(MacroAssembler* masm) : Pool(masm) {} + + void Reset(); + + void Block() { monitor_++; } + void Release(); + bool IsBlocked() const { return monitor_ != 0; } + bool IsEmpty() const { return unresolved_branches_.IsEmpty(); } + + class BranchInfo { + public: + BranchInfo() + : first_unreacheable_pc_(0), + pc_offset_(0), + label_(NULL), + branch_type_(UnknownBranchType) {} + BranchInfo(ptrdiff_t offset, Label* label, ImmBranchType branch_type) + : pc_offset_(offset), label_(label), branch_type_(branch_type) { + first_unreacheable_pc_ = + pc_offset_ + Instruction::GetImmBranchForwardRange(branch_type_); + } + + static bool IsValidComparison(const BranchInfo& branch_1, + const BranchInfo& branch_2) { + // BranchInfo are always compared against against other objects with + // the same branch type. + if (branch_1.branch_type_ != branch_2.branch_type_) { + return false; + } + // Since we should never have two branch infos with the same offsets, it + // first looks like we should check that offsets are different. However + // the operators may also be used to *search* for a branch info in the + // set. + bool same_offsets = (branch_1.pc_offset_ == branch_2.pc_offset_); + return (!same_offsets || ((branch_1.label_ == branch_2.label_) && + (branch_1.first_unreacheable_pc_ == + branch_2.first_unreacheable_pc_))); + } + + // We must provide comparison operators to work with InvalSet. + bool operator==(const BranchInfo& other) const { + VIXL_ASSERT(IsValidComparison(*this, other)); + return pc_offset_ == other.pc_offset_; + } + bool operator<(const BranchInfo& other) const { + VIXL_ASSERT(IsValidComparison(*this, other)); + return pc_offset_ < other.pc_offset_; + } + bool operator<=(const BranchInfo& other) const { + VIXL_ASSERT(IsValidComparison(*this, other)); + return pc_offset_ <= other.pc_offset_; + } + bool operator>(const BranchInfo& other) const { + VIXL_ASSERT(IsValidComparison(*this, other)); + return pc_offset_ > other.pc_offset_; + } + + // First instruction position that is not reachable by the branch using a + // positive branch offset. + ptrdiff_t first_unreacheable_pc_; + // Offset of the branch in the code generation buffer. + ptrdiff_t pc_offset_; + // The label branched to. + Label* label_; + ImmBranchType branch_type_; + }; + + bool BranchTypeUsesVeneers(ImmBranchType type) { + return (type != UnknownBranchType) && (type != UncondBranchType); + } + + void RegisterUnresolvedBranch(ptrdiff_t branch_pos, + Label* label, + ImmBranchType branch_type); + void DeleteUnresolvedBranchInfoForLabel(Label* label); + + bool ShouldEmitVeneer(int64_t first_unreacheable_pc, size_t amount); + bool ShouldEmitVeneers(size_t amount) { + return ShouldEmitVeneer(unresolved_branches_.GetFirstLimit(), amount); + } + + void CheckEmitFor(size_t amount, EmitOption option = kBranchRequired); + void Emit(EmitOption option, size_t margin); + + // The code size generated for a veneer. Currently one branch instruction. + // This is for code size checking purposes, and can be extended in the future + // for example if we decide to add nops between the veneers. + static const int kVeneerCodeSize = 1 * kInstructionSize; + // The maximum size of code other than veneers that can be generated when + // emitting a veneer pool. Currently there can be an additional branch to jump + // over the pool. + static const int kPoolNonVeneerCodeSize = 1 * kInstructionSize; + + void UpdateNextCheckPoint() { SetNextCheckpoint(GetNextCheckPoint()); } + + int GetNumberOfPotentialVeneers() const { + return static_cast(unresolved_branches_.GetSize()); + } + VIXL_DEPRECATED("GetNumberOfPotentialVeneers", + int NumberOfPotentialVeneers() const) { + return GetNumberOfPotentialVeneers(); + } + + size_t GetMaxSize() const { + return kPoolNonVeneerCodeSize + + unresolved_branches_.GetSize() * kVeneerCodeSize; + } + VIXL_DEPRECATED("GetMaxSize", size_t MaxSize() const) { return GetMaxSize(); } + + size_t GetOtherPoolsMaxSize() const; + VIXL_DEPRECATED("GetOtherPoolsMaxSize", size_t OtherPoolsMaxSize() const) { + return GetOtherPoolsMaxSize(); + } + + static const int kNPreallocatedInfos = 4; + static const ptrdiff_t kInvalidOffset = PTRDIFF_MAX; + static const size_t kReclaimFrom = 128; + static const size_t kReclaimFactor = 16; + + private: + typedef InvalSet + BranchInfoTypedSetBase; + typedef InvalSetIterator BranchInfoTypedSetIterBase; + + class BranchInfoTypedSet : public BranchInfoTypedSetBase { + public: + BranchInfoTypedSet() : BranchInfoTypedSetBase() {} + + ptrdiff_t GetFirstLimit() { + if (empty()) { + return kInvalidOffset; + } + return GetMinElementKey(); + } + VIXL_DEPRECATED("GetFirstLimit", ptrdiff_t FirstLimit()) { + return GetFirstLimit(); + } + }; + + class BranchInfoTypedSetIterator : public BranchInfoTypedSetIterBase { + public: + BranchInfoTypedSetIterator() : BranchInfoTypedSetIterBase(NULL) {} + explicit BranchInfoTypedSetIterator(BranchInfoTypedSet* typed_set) + : BranchInfoTypedSetIterBase(typed_set) {} + + // TODO: Remove these and use the STL-like interface instead. + using BranchInfoTypedSetIterBase::Advance; + using BranchInfoTypedSetIterBase::Current; + }; + + class BranchInfoSet { + public: + void insert(BranchInfo branch_info) { + ImmBranchType type = branch_info.branch_type_; + VIXL_ASSERT(IsValidBranchType(type)); + typed_set_[BranchIndexFromType(type)].insert(branch_info); + } + + void erase(BranchInfo branch_info) { + if (IsValidBranchType(branch_info.branch_type_)) { + int index = + BranchInfoSet::BranchIndexFromType(branch_info.branch_type_); + typed_set_[index].erase(branch_info); + } + } + + size_t GetSize() const { + size_t res = 0; + for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) { + res += typed_set_[i].size(); + } + return res; + } + VIXL_DEPRECATED("GetSize", size_t size() const) { return GetSize(); } + + bool IsEmpty() const { + for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) { + if (!typed_set_[i].empty()) { + return false; + } + } + return true; + } + VIXL_DEPRECATED("IsEmpty", bool empty() const) { return IsEmpty(); } + + ptrdiff_t GetFirstLimit() { + ptrdiff_t res = kInvalidOffset; + for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) { + res = std::min(res, typed_set_[i].GetFirstLimit()); + } + return res; + } + VIXL_DEPRECATED("GetFirstLimit", ptrdiff_t FirstLimit()) { + return GetFirstLimit(); + } + + void Reset() { + for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) { + typed_set_[i].clear(); + } + } + + static ImmBranchType BranchTypeFromIndex(int index) { + switch (index) { + case 0: + return CondBranchType; + case 1: + return CompareBranchType; + case 2: + return TestBranchType; + default: + VIXL_UNREACHABLE(); + return UnknownBranchType; + } + } + static int BranchIndexFromType(ImmBranchType branch_type) { + switch (branch_type) { + case CondBranchType: + return 0; + case CompareBranchType: + return 1; + case TestBranchType: + return 2; + default: + VIXL_UNREACHABLE(); + return 0; + } + } + + bool IsValidBranchType(ImmBranchType branch_type) { + return (branch_type != UnknownBranchType) && + (branch_type != UncondBranchType); + } + + private: + static const int kNumberOfTrackedBranchTypes = 3; + BranchInfoTypedSet typed_set_[kNumberOfTrackedBranchTypes]; + + friend class VeneerPool; + friend class BranchInfoSetIterator; + }; + + class BranchInfoSetIterator { + public: + explicit BranchInfoSetIterator(BranchInfoSet* set) : set_(set) { + for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) { + new (&sub_iterator_[i]) + BranchInfoTypedSetIterator(&(set_->typed_set_[i])); + } + } + + VeneerPool::BranchInfo* Current() { + for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) { + if (!sub_iterator_[i].Done()) { + return sub_iterator_[i].Current(); + } + } + VIXL_UNREACHABLE(); + return NULL; + } + + void Advance() { + VIXL_ASSERT(!Done()); + for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) { + if (!sub_iterator_[i].Done()) { + sub_iterator_[i].Advance(); + return; + } + } + VIXL_UNREACHABLE(); + } + + bool Done() const { + for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) { + if (!sub_iterator_[i].Done()) return false; + } + return true; + } + + void AdvanceToNextType() { + VIXL_ASSERT(!Done()); + for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) { + if (!sub_iterator_[i].Done()) { + sub_iterator_[i].Finish(); + return; + } + } + VIXL_UNREACHABLE(); + } + + void DeleteCurrentAndAdvance() { + for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) { + if (!sub_iterator_[i].Done()) { + sub_iterator_[i].DeleteCurrentAndAdvance(); + return; + } + } + } + + private: + BranchInfoSet* set_; + BranchInfoTypedSetIterator + sub_iterator_[BranchInfoSet::kNumberOfTrackedBranchTypes]; + }; + + ptrdiff_t GetNextCheckPoint() { + if (unresolved_branches_.IsEmpty()) { + return kNoCheckpointRequired; + } else { + return unresolved_branches_.GetFirstLimit(); + } + } + VIXL_DEPRECATED("GetNextCheckPoint", ptrdiff_t NextCheckPoint()) { + return GetNextCheckPoint(); + } + + // Information about unresolved (forward) branches. + BranchInfoSet unresolved_branches_; +}; + + +// Helper for common Emission checks. +// The macro-instruction maps to a single instruction. +class SingleEmissionCheckScope : public EmissionCheckScope { + public: + explicit SingleEmissionCheckScope(MacroAssemblerInterface* masm) + : EmissionCheckScope(masm, kInstructionSize) {} +}; + + +// The macro instruction is a "typical" macro-instruction. Typical macro- +// instruction only emit a few instructions, a few being defined as 8 here. +class MacroEmissionCheckScope : public EmissionCheckScope { + public: + explicit MacroEmissionCheckScope(MacroAssemblerInterface* masm) + : EmissionCheckScope(masm, kTypicalMacroInstructionMaxSize) {} + + private: + static const size_t kTypicalMacroInstructionMaxSize = 8 * kInstructionSize; +}; + + +enum BranchType { + // Copies of architectural conditions. + // The associated conditions can be used in place of those, the code will + // take care of reinterpreting them with the correct type. + integer_eq = eq, + integer_ne = ne, + integer_hs = hs, + integer_lo = lo, + integer_mi = mi, + integer_pl = pl, + integer_vs = vs, + integer_vc = vc, + integer_hi = hi, + integer_ls = ls, + integer_ge = ge, + integer_lt = lt, + integer_gt = gt, + integer_le = le, + integer_al = al, + integer_nv = nv, + + // These two are *different* from the architectural codes al and nv. + // 'always' is used to generate unconditional branches. + // 'never' is used to not generate a branch (generally as the inverse + // branch type of 'always). + always, + never, + // cbz and cbnz + reg_zero, + reg_not_zero, + // tbz and tbnz + reg_bit_clear, + reg_bit_set, + + // Aliases. + kBranchTypeFirstCondition = eq, + kBranchTypeLastCondition = nv, + kBranchTypeFirstUsingReg = reg_zero, + kBranchTypeFirstUsingBit = reg_bit_clear +}; + + +enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg }; + +// The macro assembler supports moving automatically pre-shifted immediates for +// arithmetic and logical instructions, and then applying a post shift in the +// instruction to undo the modification, in order to reduce the code emitted for +// an operation. For example: +// +// Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1. +// +// This optimisation can be only partially applied when the stack pointer is an +// operand or destination, so this enumeration is used to control the shift. +enum PreShiftImmMode { + kNoShift, // Don't pre-shift. + kLimitShiftForSP, // Limit pre-shift for add/sub extend use. + kAnyShift // Allow any pre-shift. +}; + + +class MacroAssembler : public Assembler, public MacroAssemblerInterface { + public: + explicit MacroAssembler( + PositionIndependentCodeOption pic = PositionIndependentCode); + MacroAssembler(size_t capacity, + PositionIndependentCodeOption pic = PositionIndependentCode); + MacroAssembler(byte* buffer, + size_t capacity, + PositionIndependentCodeOption pic = PositionIndependentCode); + ~MacroAssembler(); + + enum FinalizeOption { + kFallThrough, // There may be more code to execute after calling Finalize. + kUnreachable // Anything generated after calling Finalize is unreachable. + }; + + virtual vixl::internal::AssemblerBase* AsAssemblerBase() VIXL_OVERRIDE { + return this; + } + + // TODO(pools): implement these functions. + virtual void EmitPoolHeader() VIXL_OVERRIDE {} + virtual void EmitPoolFooter() VIXL_OVERRIDE {} + virtual void EmitPaddingBytes(int n) VIXL_OVERRIDE { USE(n); } + virtual void EmitNopBytes(int n) VIXL_OVERRIDE { USE(n); } + + // Start generating code from the beginning of the buffer, discarding any code + // and data that has already been emitted into the buffer. + // + // In order to avoid any accidental transfer of state, Reset ASSERTs that the + // constant pool is not blocked. + void Reset(); + + // Finalize a code buffer of generated instructions. This function must be + // called before executing or copying code from the buffer. By default, + // anything generated after this should not be reachable (the last instruction + // generated is an unconditional branch). If you need to generate more code, + // then set `option` to kFallThrough. + void FinalizeCode(FinalizeOption option = kUnreachable); + + + // Constant generation helpers. + // These functions return the number of instructions required to move the + // immediate into the destination register. Also, if the masm pointer is + // non-null, it generates the code to do so. + // The two features are implemented using one function to avoid duplication of + // the logic. + // The function can be used to evaluate the cost of synthesizing an + // instruction using 'mov immediate' instructions. A user might prefer loading + // a constant using the literal pool instead of using multiple 'mov immediate' + // instructions. + static int MoveImmediateHelper(MacroAssembler* masm, + const Register& rd, + uint64_t imm); + static bool OneInstrMoveImmediateHelper(MacroAssembler* masm, + const Register& dst, + int64_t imm); + + + // Logical macros. + void And(const Register& rd, const Register& rn, const Operand& operand); + void Ands(const Register& rd, const Register& rn, const Operand& operand); + void Bic(const Register& rd, const Register& rn, const Operand& operand); + void Bics(const Register& rd, const Register& rn, const Operand& operand); + void Orr(const Register& rd, const Register& rn, const Operand& operand); + void Orn(const Register& rd, const Register& rn, const Operand& operand); + void Eor(const Register& rd, const Register& rn, const Operand& operand); + void Eon(const Register& rd, const Register& rn, const Operand& operand); + void Tst(const Register& rn, const Operand& operand); + void LogicalMacro(const Register& rd, + const Register& rn, + const Operand& operand, + LogicalOp op); + + // Add and sub macros. + void Add(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S = LeaveFlags); + void Adds(const Register& rd, const Register& rn, const Operand& operand); + void Sub(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S = LeaveFlags); + void Subs(const Register& rd, const Register& rn, const Operand& operand); + void Cmn(const Register& rn, const Operand& operand); + void Cmp(const Register& rn, const Operand& operand); + void Neg(const Register& rd, const Operand& operand); + void Negs(const Register& rd, const Operand& operand); + + void AddSubMacro(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubOp op); + + // Add/sub with carry macros. + void Adc(const Register& rd, const Register& rn, const Operand& operand); + void Adcs(const Register& rd, const Register& rn, const Operand& operand); + void Sbc(const Register& rd, const Register& rn, const Operand& operand); + void Sbcs(const Register& rd, const Register& rn, const Operand& operand); + void Ngc(const Register& rd, const Operand& operand); + void Ngcs(const Register& rd, const Operand& operand); + void AddSubWithCarryMacro(const Register& rd, + const Register& rn, + const Operand& operand, + FlagsUpdate S, + AddSubWithCarryOp op); + + void Rmif(const Register& xn, unsigned shift, StatusFlags flags); + void Setf8(const Register& wn); + void Setf16(const Register& wn); + + // Move macros. + void Mov(const Register& rd, uint64_t imm); + void Mov(const Register& rd, + const Operand& operand, + DiscardMoveMode discard_mode = kDontDiscardForSameWReg); + void Mvn(const Register& rd, uint64_t imm) { + Mov(rd, (rd.GetSizeInBits() == kXRegSize) ? ~imm : (~imm & kWRegMask)); + } + void Mvn(const Register& rd, const Operand& operand); + + // Try to move an immediate into the destination register in a single + // instruction. Returns true for success, and updates the contents of dst. + // Returns false, otherwise. + bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm); + + // Move an immediate into register dst, and return an Operand object for + // use with a subsequent instruction that accepts a shift. The value moved + // into dst is not necessarily equal to imm; it may have had a shifting + // operation applied to it that will be subsequently undone by the shift + // applied in the Operand. + Operand MoveImmediateForShiftedOp(const Register& dst, + int64_t imm, + PreShiftImmMode mode); + + void Move(const GenericOperand& dst, const GenericOperand& src); + + // Synthesises the address represented by a MemOperand into a register. + void ComputeAddress(const Register& dst, const MemOperand& mem_op); + + // Conditional macros. + void Ccmp(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond); + void Ccmn(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond); + void ConditionalCompareMacro(const Register& rn, + const Operand& operand, + StatusFlags nzcv, + Condition cond, + ConditionalCompareOp op); + + // On return, the boolean values pointed to will indicate whether `left` and + // `right` should be synthesised in a temporary register. + static void GetCselSynthesisInformation(const Register& rd, + const Operand& left, + const Operand& right, + bool* should_synthesise_left, + bool* should_synthesise_right) { + // Note that the helper does not need to look at the condition. + CselHelper(NULL, + rd, + left, + right, + eq, + should_synthesise_left, + should_synthesise_right); + } + + void Csel(const Register& rd, + const Operand& left, + const Operand& right, + Condition cond) { + CselHelper(this, rd, left, right, cond); + } + +// Load/store macros. +#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \ + void FN(const REGTYPE REG, const MemOperand& addr); + LS_MACRO_LIST(DECLARE_FUNCTION) +#undef DECLARE_FUNCTION + + void LoadStoreMacro(const CPURegister& rt, + const MemOperand& addr, + LoadStoreOp op); + +#define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \ + void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr); + LSPAIR_MACRO_LIST(DECLARE_FUNCTION) +#undef DECLARE_FUNCTION + + void LoadStorePairMacro(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& addr, + LoadStorePairOp op); + + void Prfm(PrefetchOperation op, const MemOperand& addr); + + // Push or pop up to 4 registers of the same width to or from the stack, + // using the current stack pointer as set by SetStackPointer. + // + // If an argument register is 'NoReg', all further arguments are also assumed + // to be 'NoReg', and are thus not pushed or popped. + // + // Arguments are ordered such that "Push(a, b);" is functionally equivalent + // to "Push(a); Push(b);". + // + // It is valid to push the same register more than once, and there is no + // restriction on the order in which registers are specified. + // + // It is not valid to pop into the same register more than once in one + // operation, not even into the zero register. + // + // If the current stack pointer (as set by SetStackPointer) is sp, then it + // must be aligned to 16 bytes on entry and the total size of the specified + // registers must also be a multiple of 16 bytes. + // + // Even if the current stack pointer is not the system stack pointer (sp), + // Push (and derived methods) will still modify the system stack pointer in + // order to comply with ABI rules about accessing memory below the system + // stack pointer. + // + // Other than the registers passed into Pop, the stack pointer and (possibly) + // the system stack pointer, these methods do not modify any other registers. + void Push(const CPURegister& src0, + const CPURegister& src1 = NoReg, + const CPURegister& src2 = NoReg, + const CPURegister& src3 = NoReg); + void Pop(const CPURegister& dst0, + const CPURegister& dst1 = NoReg, + const CPURegister& dst2 = NoReg, + const CPURegister& dst3 = NoReg); + + // Alternative forms of Push and Pop, taking a RegList or CPURegList that + // specifies the registers that are to be pushed or popped. Higher-numbered + // registers are associated with higher memory addresses (as in the A32 push + // and pop instructions). + // + // (Push|Pop)SizeRegList allow you to specify the register size as a + // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are + // supported. + // + // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred. + void PushCPURegList(CPURegList registers); + void PopCPURegList(CPURegList registers); + + void PushSizeRegList( + RegList registers, + unsigned reg_size, + CPURegister::RegisterType type = CPURegister::kRegister) { + PushCPURegList(CPURegList(type, reg_size, registers)); + } + void PopSizeRegList(RegList registers, + unsigned reg_size, + CPURegister::RegisterType type = CPURegister::kRegister) { + PopCPURegList(CPURegList(type, reg_size, registers)); + } + void PushXRegList(RegList regs) { PushSizeRegList(regs, kXRegSize); } + void PopXRegList(RegList regs) { PopSizeRegList(regs, kXRegSize); } + void PushWRegList(RegList regs) { PushSizeRegList(regs, kWRegSize); } + void PopWRegList(RegList regs) { PopSizeRegList(regs, kWRegSize); } + void PushDRegList(RegList regs) { + PushSizeRegList(regs, kDRegSize, CPURegister::kVRegister); + } + void PopDRegList(RegList regs) { + PopSizeRegList(regs, kDRegSize, CPURegister::kVRegister); + } + void PushSRegList(RegList regs) { + PushSizeRegList(regs, kSRegSize, CPURegister::kVRegister); + } + void PopSRegList(RegList regs) { + PopSizeRegList(regs, kSRegSize, CPURegister::kVRegister); + } + + // Push the specified register 'count' times. + void PushMultipleTimes(int count, Register src); + + // Poke 'src' onto the stack. The offset is in bytes. + // + // If the current stack pointer (as set by SetStackPointer) is sp, then sp + // must be aligned to 16 bytes. + void Poke(const Register& src, const Operand& offset); + + // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes. + // + // If the current stack pointer (as set by SetStackPointer) is sp, then sp + // must be aligned to 16 bytes. + void Peek(const Register& dst, const Operand& offset); + + // Alternative forms of Peek and Poke, taking a RegList or CPURegList that + // specifies the registers that are to be pushed or popped. Higher-numbered + // registers are associated with higher memory addresses. + // + // (Peek|Poke)SizeRegList allow you to specify the register size as a + // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are + // supported. + // + // Otherwise, (Peek|Poke)(CPU|X|W|D|S)RegList is preferred. + void PeekCPURegList(CPURegList registers, int64_t offset) { + LoadCPURegList(registers, MemOperand(StackPointer(), offset)); + } + void PokeCPURegList(CPURegList registers, int64_t offset) { + StoreCPURegList(registers, MemOperand(StackPointer(), offset)); + } + + void PeekSizeRegList( + RegList registers, + int64_t offset, + unsigned reg_size, + CPURegister::RegisterType type = CPURegister::kRegister) { + PeekCPURegList(CPURegList(type, reg_size, registers), offset); + } + void PokeSizeRegList( + RegList registers, + int64_t offset, + unsigned reg_size, + CPURegister::RegisterType type = CPURegister::kRegister) { + PokeCPURegList(CPURegList(type, reg_size, registers), offset); + } + void PeekXRegList(RegList regs, int64_t offset) { + PeekSizeRegList(regs, offset, kXRegSize); + } + void PokeXRegList(RegList regs, int64_t offset) { + PokeSizeRegList(regs, offset, kXRegSize); + } + void PeekWRegList(RegList regs, int64_t offset) { + PeekSizeRegList(regs, offset, kWRegSize); + } + void PokeWRegList(RegList regs, int64_t offset) { + PokeSizeRegList(regs, offset, kWRegSize); + } + void PeekDRegList(RegList regs, int64_t offset) { + PeekSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister); + } + void PokeDRegList(RegList regs, int64_t offset) { + PokeSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister); + } + void PeekSRegList(RegList regs, int64_t offset) { + PeekSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister); + } + void PokeSRegList(RegList regs, int64_t offset) { + PokeSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister); + } + + + // Claim or drop stack space without actually accessing memory. + // + // If the current stack pointer (as set by SetStackPointer) is sp, then it + // must be aligned to 16 bytes and the size claimed or dropped must be a + // multiple of 16 bytes. + void Claim(const Operand& size); + void Drop(const Operand& size); + + // Preserve the callee-saved registers (as defined by AAPCS64). + // + // Higher-numbered registers are pushed before lower-numbered registers, and + // thus get higher addresses. + // Floating-point registers are pushed before general-purpose registers, and + // thus get higher addresses. + // + // This method must not be called unless StackPointer() is sp, and it is + // aligned to 16 bytes. + void PushCalleeSavedRegisters(); + + // Restore the callee-saved registers (as defined by AAPCS64). + // + // Higher-numbered registers are popped after lower-numbered registers, and + // thus come from higher addresses. + // Floating-point registers are popped after general-purpose registers, and + // thus come from higher addresses. + // + // This method must not be called unless StackPointer() is sp, and it is + // aligned to 16 bytes. + void PopCalleeSavedRegisters(); + + void LoadCPURegList(CPURegList registers, const MemOperand& src); + void StoreCPURegList(CPURegList registers, const MemOperand& dst); + + // Remaining instructions are simple pass-through calls to the assembler. + void Adr(const Register& rd, Label* label) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + adr(rd, label); + } + void Adrp(const Register& rd, Label* label) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + adrp(rd, label); + } + void Asr(const Register& rd, const Register& rn, unsigned shift) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + asr(rd, rn, shift); + } + void Asr(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + asrv(rd, rn, rm); + } + + // Branch type inversion relies on these relations. + VIXL_STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) && + (reg_bit_clear == (reg_bit_set ^ 1)) && + (always == (never ^ 1))); + + BranchType InvertBranchType(BranchType type) { + if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { + return static_cast( + InvertCondition(static_cast(type))); + } else { + return static_cast(type ^ 1); + } + } + + void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1); + + void B(Label* label); + void B(Label* label, Condition cond); + void B(Condition cond, Label* label) { B(label, cond); } + void Bfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + bfm(rd, rn, immr, imms); + } + void Bfi(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + bfi(rd, rn, lsb, width); + } + void Bfc(const Register& rd, unsigned lsb, unsigned width) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + bfc(rd, lsb, width); + } + void Bfxil(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + bfxil(rd, rn, lsb, width); + } + void Bind(Label* label, BranchTargetIdentifier id = EmitBTI_none); + // Bind a label to a specified offset from the start of the buffer. + void BindToOffset(Label* label, ptrdiff_t offset); + void Bl(Label* label) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + bl(label); + } + void Blr(const Register& xn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!xn.IsZero()); + SingleEmissionCheckScope guard(this); + blr(xn); + } + void Br(const Register& xn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!xn.IsZero()); + SingleEmissionCheckScope guard(this); + br(xn); + } + void Braaz(const Register& xn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + braaz(xn); + } + void Brabz(const Register& xn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + brabz(xn); + } + void Blraaz(const Register& xn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + blraaz(xn); + } + void Blrabz(const Register& xn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + blrabz(xn); + } + void Retaa() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + retaa(); + } + void Retab() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + retab(); + } + void Braa(const Register& xn, const Register& xm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + braa(xn, xm); + } + void Brab(const Register& xn, const Register& xm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + brab(xn, xm); + } + void Blraa(const Register& xn, const Register& xm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + blraa(xn, xm); + } + void Blrab(const Register& xn, const Register& xm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + blrab(xn, xm); + } + void Brk(int code = 0) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + brk(code); + } + void Cbnz(const Register& rt, Label* label); + void Cbz(const Register& rt, Label* label); + void Cinc(const Register& rd, const Register& rn, Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + cinc(rd, rn, cond); + } + void Cinv(const Register& rd, const Register& rn, Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + cinv(rd, rn, cond); + } + +#define PAUTH_SYSTEM_MODES(V) \ + V(az) \ + V(bz) \ + V(asp) \ + V(bsp) + +#define DEFINE_MACRO_ASM_FUNCS(SUFFIX) \ + void Paci##SUFFIX() { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + paci##SUFFIX(); \ + } \ + void Auti##SUFFIX() { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + auti##SUFFIX(); \ + } + + PAUTH_SYSTEM_MODES(DEFINE_MACRO_ASM_FUNCS) +#undef DEFINE_MACRO_ASM_FUNCS + + // The 1716 pac and aut instructions encourage people to use x16 and x17 + // directly, perhaps without realising that this is forbidden. For example: + // + // UseScratchRegisterScope temps(&masm); + // Register temp = temps.AcquireX(); // temp will be x16 + // __ Mov(x17, ptr); + // __ Mov(x16, modifier); // Will override temp! + // __ Pacia1716(); + // + // To work around this issue, you must exclude x16 and x17 from the scratch + // register list. You may need to replace them with other registers: + // + // UseScratchRegisterScope temps(&masm); + // temps.Exclude(x16, x17); + // temps.Include(x10, x11); + // __ Mov(x17, ptr); + // __ Mov(x16, modifier); + // __ Pacia1716(); + void Pacia1716() { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x16)); + VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x17)); + SingleEmissionCheckScope guard(this); + pacia1716(); + } + void Pacib1716() { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x16)); + VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x17)); + SingleEmissionCheckScope guard(this); + pacib1716(); + } + void Autia1716() { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x16)); + VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x17)); + SingleEmissionCheckScope guard(this); + autia1716(); + } + void Autib1716() { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x16)); + VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x17)); + SingleEmissionCheckScope guard(this); + autib1716(); + } + void Xpaclri() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + xpaclri(); + } + void Clrex() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + clrex(); + } + void Cls(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + cls(rd, rn); + } + void Clz(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + clz(rd, rn); + } + void Cneg(const Register& rd, const Register& rn, Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + cneg(rd, rn, cond); + } + void Esb() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + esb(); + } + void Csdb() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + csdb(); + } + void Cset(const Register& rd, Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + cset(rd, cond); + } + void Csetm(const Register& rd, Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + csetm(rd, cond); + } + void Csinc(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT((cond != al) && (cond != nv)); + SingleEmissionCheckScope guard(this); + csinc(rd, rn, rm, cond); + } + void Csinv(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT((cond != al) && (cond != nv)); + SingleEmissionCheckScope guard(this); + csinv(rd, rn, rm, cond); + } + void Csneg(const Register& rd, + const Register& rn, + const Register& rm, + Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT((cond != al) && (cond != nv)); + SingleEmissionCheckScope guard(this); + csneg(rd, rn, rm, cond); + } + void Dmb(BarrierDomain domain, BarrierType type) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + dmb(domain, type); + } + void Dsb(BarrierDomain domain, BarrierType type) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + dsb(domain, type); + } + void Extr(const Register& rd, + const Register& rn, + const Register& rm, + unsigned lsb) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + extr(rd, rn, rm, lsb); + } + void Fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fadd(vd, vn, vm); + } + void Fccmp(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond, + FPTrapFlags trap = DisableTrap) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT((cond != al) && (cond != nv)); + SingleEmissionCheckScope guard(this); + FPCCompareMacro(vn, vm, nzcv, cond, trap); + } + void Fccmpe(const VRegister& vn, + const VRegister& vm, + StatusFlags nzcv, + Condition cond) { + Fccmp(vn, vm, nzcv, cond, EnableTrap); + } + void Fcmp(const VRegister& vn, + const VRegister& vm, + FPTrapFlags trap = DisableTrap) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + FPCompareMacro(vn, vm, trap); + } + void Fcmp(const VRegister& vn, double value, FPTrapFlags trap = DisableTrap); + void Fcmpe(const VRegister& vn, double value); + void Fcmpe(const VRegister& vn, const VRegister& vm) { + Fcmp(vn, vm, EnableTrap); + } + void Fcsel(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + Condition cond) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT((cond != al) && (cond != nv)); + SingleEmissionCheckScope guard(this); + fcsel(vd, vn, vm, cond); + } + void Fcvt(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvt(vd, vn); + } + void Fcvtl(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtl(vd, vn); + } + void Fcvtl2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtl2(vd, vn); + } + void Fcvtn(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtn(vd, vn); + } + void Fcvtn2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtn2(vd, vn); + } + void Fcvtxn(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtxn(vd, vn); + } + void Fcvtxn2(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtxn2(vd, vn); + } + void Fcvtas(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtas(rd, vn); + } + void Fcvtau(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtau(rd, vn); + } + void Fcvtms(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtms(rd, vn); + } + void Fcvtmu(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtmu(rd, vn); + } + void Fcvtns(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtns(rd, vn); + } + void Fcvtnu(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtnu(rd, vn); + } + void Fcvtps(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtps(rd, vn); + } + void Fcvtpu(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtpu(rd, vn); + } + void Fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtzs(rd, vn, fbits); + } + void Fjcvtzs(const Register& rd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fjcvtzs(rd, vn); + } + void Fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fcvtzu(rd, vn, fbits); + } + void Fdiv(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fdiv(vd, vn, vm); + } + void Fmax(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmax(vd, vn, vm); + } + void Fmaxnm(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmaxnm(vd, vn, vm); + } + void Fmin(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmin(vd, vn, vm); + } + void Fminnm(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fminnm(vd, vn, vm); + } + void Fmov(const VRegister& vd, const VRegister& vn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + // Only emit an instruction if vd and vn are different, and they are both D + // registers. fmov(s0, s0) is not a no-op because it clears the top word of + // d0. Technically, fmov(d0, d0) is not a no-op either because it clears + // the top of q0, but VRegister does not currently support Q registers. + if (!vd.Is(vn) || !vd.Is64Bits()) { + fmov(vd, vn); + } + } + void Fmov(const VRegister& vd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + fmov(vd, rn); + } + void Fmov(const VRegister& vd, const XRegister& xn) { + Fmov(vd, Register(xn)); + } + void Fmov(const VRegister& vd, const WRegister& wn) { + Fmov(vd, Register(wn)); + } + void Fmov(const VRegister& vd, int index, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmov(vd, index, rn); + } + void Fmov(const Register& rd, const VRegister& vn, int index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmov(rd, vn, index); + } + + // Provide explicit double and float interfaces for FP immediate moves, rather + // than relying on implicit C++ casts. This allows signalling NaNs to be + // preserved when the immediate matches the format of vd. Most systems convert + // signalling NaNs to quiet NaNs when converting between float and double. + void Fmov(VRegister vd, double imm); + void Fmov(VRegister vd, float imm); + void Fmov(VRegister vd, const Float16 imm); + // Provide a template to allow other types to be converted automatically. + template + void Fmov(VRegister vd, T imm) { + VIXL_ASSERT(allow_macro_instructions_); + Fmov(vd, static_cast(imm)); + } + void Fmov(Register rd, VRegister vn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + fmov(rd, vn); + } + void Fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmul(vd, vn, vm); + } + void Fnmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fnmul(vd, vn, vm); + } + void Fmadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmadd(vd, vn, vm, va); + } + void Fmsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fmsub(vd, vn, vm, va); + } + void Fnmadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fnmadd(vd, vn, vm, va); + } + void Fnmsub(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + const VRegister& va) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fnmsub(vd, vn, vm, va); + } + void Fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fsub(vd, vn, vm); + } + void Hint(SystemHint code) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + hint(code); + } + void Hint(int imm7) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + hint(imm7); + } + void Hlt(int code) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + hlt(code); + } + void Isb() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + isb(); + } + void Ldar(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldar(rt, src); + } + void Ldarb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldarb(rt, src); + } + void Ldarh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldarh(rt, src); + } + void Ldlar(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldlar(rt, src); + } + void Ldlarb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldlarb(rt, src); + } + void Ldlarh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldlarh(rt, src); + } + void Ldaxp(const Register& rt, const Register& rt2, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.Aliases(rt2)); + SingleEmissionCheckScope guard(this); + ldaxp(rt, rt2, src); + } + void Ldaxr(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldaxr(rt, src); + } + void Ldaxrb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldaxrb(rt, src); + } + void Ldaxrh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldaxrh(rt, src); + } + +// clang-format off +#define COMPARE_AND_SWAP_SINGLE_MACRO_LIST(V) \ + V(cas, Cas) \ + V(casa, Casa) \ + V(casl, Casl) \ + V(casal, Casal) \ + V(casb, Casb) \ + V(casab, Casab) \ + V(caslb, Caslb) \ + V(casalb, Casalb) \ + V(cash, Cash) \ + V(casah, Casah) \ + V(caslh, Caslh) \ + V(casalh, Casalh) +// clang-format on + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(rs, rt, src); \ + } + COMPARE_AND_SWAP_SINGLE_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + + +// clang-format off +#define COMPARE_AND_SWAP_PAIR_MACRO_LIST(V) \ + V(casp, Casp) \ + V(caspa, Caspa) \ + V(caspl, Caspl) \ + V(caspal, Caspal) +// clang-format on + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const Register& rs, \ + const Register& rs2, \ + const Register& rt, \ + const Register& rt2, \ + const MemOperand& src) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(rs, rs2, rt, rt2, src); \ + } + COMPARE_AND_SWAP_PAIR_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + +// These macros generate all the variations of the atomic memory operations, +// e.g. ldadd, ldadda, ldaddb, staddl, etc. + +// clang-format off +#define ATOMIC_MEMORY_SIMPLE_MACRO_LIST(V, DEF, MASM_PRE, ASM_PRE) \ + V(DEF, MASM_PRE##add, ASM_PRE##add) \ + V(DEF, MASM_PRE##clr, ASM_PRE##clr) \ + V(DEF, MASM_PRE##eor, ASM_PRE##eor) \ + V(DEF, MASM_PRE##set, ASM_PRE##set) \ + V(DEF, MASM_PRE##smax, ASM_PRE##smax) \ + V(DEF, MASM_PRE##smin, ASM_PRE##smin) \ + V(DEF, MASM_PRE##umax, ASM_PRE##umax) \ + V(DEF, MASM_PRE##umin, ASM_PRE##umin) + +#define ATOMIC_MEMORY_STORE_MACRO_MODES(V, MASM, ASM) \ + V(MASM, ASM) \ + V(MASM##l, ASM##l) \ + V(MASM##b, ASM##b) \ + V(MASM##lb, ASM##lb) \ + V(MASM##h, ASM##h) \ + V(MASM##lh, ASM##lh) + +#define ATOMIC_MEMORY_LOAD_MACRO_MODES(V, MASM, ASM) \ + ATOMIC_MEMORY_STORE_MACRO_MODES(V, MASM, ASM) \ + V(MASM##a, ASM##a) \ + V(MASM##al, ASM##al) \ + V(MASM##ab, ASM##ab) \ + V(MASM##alb, ASM##alb) \ + V(MASM##ah, ASM##ah) \ + V(MASM##alh, ASM##alh) +// clang-format on + +#define DEFINE_MACRO_LOAD_ASM_FUNC(MASM, ASM) \ + void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(rs, rt, src); \ + } +#define DEFINE_MACRO_STORE_ASM_FUNC(MASM, ASM) \ + void MASM(const Register& rs, const MemOperand& src) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(rs, src); \ + } + + ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_LOAD_MACRO_MODES, + DEFINE_MACRO_LOAD_ASM_FUNC, + Ld, + ld) + ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_STORE_MACRO_MODES, + DEFINE_MACRO_STORE_ASM_FUNC, + St, + st) + +#define DEFINE_MACRO_SWP_ASM_FUNC(MASM, ASM) \ + void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(rs, rt, src); \ + } + + ATOMIC_MEMORY_LOAD_MACRO_MODES(DEFINE_MACRO_SWP_ASM_FUNC, Swp, swp) + +#undef DEFINE_MACRO_LOAD_ASM_FUNC +#undef DEFINE_MACRO_STORE_ASM_FUNC +#undef DEFINE_MACRO_SWP_ASM_FUNC + + void Ldaprb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + VIXL_ASSERT(src.IsImmediateOffset()); + if (src.GetOffset() == 0) { + ldaprb(rt, src); + } else { + ldapurb(rt, src); + } + } + + void Ldapursb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldapursb(rt, src); + } + + void Ldaprh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + VIXL_ASSERT(src.IsImmediateOffset()); + if (src.GetOffset() == 0) { + ldaprh(rt, src); + } else { + ldapurh(rt, src); + } + } + + void Ldapursh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldapursh(rt, src); + } + + void Ldapr(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + VIXL_ASSERT(src.IsImmediateOffset()); + if (src.GetOffset() == 0) { + ldapr(rt, src); + } else { + ldapur(rt, src); + } + } + + void Ldapursw(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldapursw(rt, src); + } + + void Ldnp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldnp(rt, rt2, src); + } + // Provide both double and float interfaces for FP immediate loads, rather + // than relying on implicit C++ casts. This allows signalling NaNs to be + // preserved when the immediate matches the format of fd. Most systems convert + // signalling NaNs to quiet NaNs when converting between float and double. + void Ldr(const VRegister& vt, double imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + RawLiteral* literal; + if (vt.IsD()) { + literal = new Literal(imm, + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool); + } else { + literal = new Literal(static_cast(imm), + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool); + } + ldr(vt, literal); + } + void Ldr(const VRegister& vt, float imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + RawLiteral* literal; + if (vt.IsS()) { + literal = new Literal(imm, + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool); + } else { + literal = new Literal(static_cast(imm), + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool); + } + ldr(vt, literal); + } + void Ldr(const VRegister& vt, uint64_t high64, uint64_t low64) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(vt.IsQ()); + SingleEmissionCheckScope guard(this); + ldr(vt, + new Literal(high64, + low64, + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool)); + } + void Ldr(const Register& rt, uint64_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.IsZero()); + SingleEmissionCheckScope guard(this); + RawLiteral* literal; + if (rt.Is64Bits()) { + literal = new Literal(imm, + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool); + } else { + VIXL_ASSERT(rt.Is32Bits()); + VIXL_ASSERT(IsUint32(imm) || IsInt32(imm)); + literal = new Literal(static_cast(imm), + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool); + } + ldr(rt, literal); + } + void Ldrsw(const Register& rt, uint32_t imm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.IsZero()); + SingleEmissionCheckScope guard(this); + ldrsw(rt, + new Literal(imm, + &literal_pool_, + RawLiteral::kDeletedOnPlacementByPool)); + } + void Ldr(const CPURegister& rt, RawLiteral* literal) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldr(rt, literal); + } + void Ldrsw(const Register& rt, RawLiteral* literal) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldrsw(rt, literal); + } + void Ldxp(const Register& rt, const Register& rt2, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.Aliases(rt2)); + SingleEmissionCheckScope guard(this); + ldxp(rt, rt2, src); + } + void Ldxr(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldxr(rt, src); + } + void Ldxrb(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldxrb(rt, src); + } + void Ldxrh(const Register& rt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldxrh(rt, src); + } + void Lsl(const Register& rd, const Register& rn, unsigned shift) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + lsl(rd, rn, shift); + } + void Lsl(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + lslv(rd, rn, rm); + } + void Lsr(const Register& rd, const Register& rn, unsigned shift) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + lsr(rd, rn, shift); + } + void Lsr(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + lsrv(rd, rn, rm); + } + void Ldraa(const Register& xt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldraa(xt, src); + } + void Ldrab(const Register& xt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ldrab(xt, src); + } + void Madd(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT(!ra.IsZero()); + SingleEmissionCheckScope guard(this); + madd(rd, rn, rm, ra); + } + void Mneg(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + mneg(rd, rn, rm); + } + void Mov(const Register& rd, + const Register& rn, + DiscardMoveMode discard_mode = kDontDiscardForSameWReg) { + VIXL_ASSERT(allow_macro_instructions_); + // Emit a register move only if the registers are distinct, or if they are + // not X registers. + // + // Note that mov(w0, w0) is not a no-op because it clears the top word of + // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W + // registers is not required to clear the top word of the X register. In + // this case, the instruction is discarded. + // + // If the sp is an operand, add #0 is emitted, otherwise, orr #0. + if (!rd.Is(rn) || + (rd.Is32Bits() && (discard_mode == kDontDiscardForSameWReg))) { + SingleEmissionCheckScope guard(this); + mov(rd, rn); + } + } + void Movk(const Register& rd, uint64_t imm, int shift = -1) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + SingleEmissionCheckScope guard(this); + movk(rd, imm, shift); + } + void Mrs(const Register& rt, SystemRegister sysreg) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.IsZero()); + SingleEmissionCheckScope guard(this); + mrs(rt, sysreg); + } + void Msr(SystemRegister sysreg, const Register& rt) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rt.IsZero()); + SingleEmissionCheckScope guard(this); + msr(sysreg, rt); + } + void Cfinv() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cfinv(); + } + void Axflag() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + axflag(); + } + void Xaflag() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + xaflag(); + } + void Sys(int op1, int crn, int crm, int op2, const Register& rt = xzr) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + sys(op1, crn, crm, op2, rt); + } + void Dc(DataCacheOp op, const Register& rt) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + dc(op, rt); + } + void Ic(InstructionCacheOp op, const Register& rt) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ic(op, rt); + } + void Msub(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT(!ra.IsZero()); + SingleEmissionCheckScope guard(this); + msub(rd, rn, rm, ra); + } + void Mul(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + mul(rd, rn, rm); + } + void Nop() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + nop(); + } + void Rbit(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + rbit(rd, rn); + } + void Ret(const Register& xn = lr) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!xn.IsZero()); + SingleEmissionCheckScope guard(this); + ret(xn); + } + void Rev(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + rev(rd, rn); + } + void Rev16(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + rev16(rd, rn); + } + void Rev32(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + rev32(rd, rn); + } + void Rev64(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + rev64(rd, rn); + } + +#define PAUTH_MASM_VARIATIONS(V) \ + V(Paci, paci) \ + V(Pacd, pacd) \ + V(Auti, auti) \ + V(Autd, autd) + +#define DEFINE_MACRO_ASM_FUNCS(MASM_PRE, ASM_PRE) \ + void MASM_PRE##a(const Register& xd, const Register& xn) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM_PRE##a(xd, xn); \ + } \ + void MASM_PRE##za(const Register& xd) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM_PRE##za(xd); \ + } \ + void MASM_PRE##b(const Register& xd, const Register& xn) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM_PRE##b(xd, xn); \ + } \ + void MASM_PRE##zb(const Register& xd) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM_PRE##zb(xd); \ + } + + PAUTH_MASM_VARIATIONS(DEFINE_MACRO_ASM_FUNCS) +#undef DEFINE_MACRO_ASM_FUNCS + + void Pacga(const Register& xd, const Register& xn, const Register& xm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + pacga(xd, xn, xm); + } + + void Xpaci(const Register& xd) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + xpaci(xd); + } + + void Xpacd(const Register& xd) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + xpacd(xd); + } + void Ror(const Register& rd, const Register& rs, unsigned shift) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rs.IsZero()); + SingleEmissionCheckScope guard(this); + ror(rd, rs, shift); + } + void Ror(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + rorv(rd, rn, rm); + } + void Sbfiz(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + sbfiz(rd, rn, lsb, width); + } + void Sbfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + sbfm(rd, rn, immr, imms); + } + void Sbfx(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + sbfx(rd, rn, lsb, width); + } + void Scvtf(const VRegister& vd, const Register& rn, int fbits = 0) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + scvtf(vd, rn, fbits); + } + void Sdiv(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + sdiv(rd, rn, rm); + } + void Smaddl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT(!ra.IsZero()); + SingleEmissionCheckScope guard(this); + smaddl(rd, rn, rm, ra); + } + void Smsubl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT(!ra.IsZero()); + SingleEmissionCheckScope guard(this); + smsubl(rd, rn, rm, ra); + } + void Smull(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + smull(rd, rn, rm); + } + void Smulh(const Register& xd, const Register& xn, const Register& xm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!xd.IsZero()); + VIXL_ASSERT(!xn.IsZero()); + VIXL_ASSERT(!xm.IsZero()); + SingleEmissionCheckScope guard(this); + smulh(xd, xn, xm); + } + void Stlr(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + VIXL_ASSERT(dst.IsImmediateOffset()); + if (dst.GetOffset() == 0) { + stlr(rt, dst); + } else { + stlur(rt, dst); + } + } + void Stlrb(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + VIXL_ASSERT(dst.IsImmediateOffset()); + if (dst.GetOffset() == 0) { + stlrb(rt, dst); + } else { + stlurb(rt, dst); + } + } + void Stlrh(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + VIXL_ASSERT(dst.IsImmediateOffset()); + if (dst.GetOffset() == 0) { + stlrh(rt, dst); + } else { + stlurh(rt, dst); + } + } + void Stllr(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + stllr(rt, dst); + } + void Stllrb(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + stllrb(rt, dst); + } + void Stllrh(const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + stllrh(rt, dst); + } + void Stlxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister())); + VIXL_ASSERT(!rs.Aliases(rt)); + VIXL_ASSERT(!rs.Aliases(rt2)); + SingleEmissionCheckScope guard(this); + stlxp(rs, rt, rt2, dst); + } + void Stlxr(const Register& rs, const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister())); + VIXL_ASSERT(!rs.Aliases(rt)); + SingleEmissionCheckScope guard(this); + stlxr(rs, rt, dst); + } + void Stlxrb(const Register& rs, const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister())); + VIXL_ASSERT(!rs.Aliases(rt)); + SingleEmissionCheckScope guard(this); + stlxrb(rs, rt, dst); + } + void Stlxrh(const Register& rs, const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister())); + VIXL_ASSERT(!rs.Aliases(rt)); + SingleEmissionCheckScope guard(this); + stlxrh(rs, rt, dst); + } + void Stnp(const CPURegister& rt, + const CPURegister& rt2, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + stnp(rt, rt2, dst); + } + void Stxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister())); + VIXL_ASSERT(!rs.Aliases(rt)); + VIXL_ASSERT(!rs.Aliases(rt2)); + SingleEmissionCheckScope guard(this); + stxp(rs, rt, rt2, dst); + } + void Stxr(const Register& rs, const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister())); + VIXL_ASSERT(!rs.Aliases(rt)); + SingleEmissionCheckScope guard(this); + stxr(rs, rt, dst); + } + void Stxrb(const Register& rs, const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister())); + VIXL_ASSERT(!rs.Aliases(rt)); + SingleEmissionCheckScope guard(this); + stxrb(rs, rt, dst); + } + void Stxrh(const Register& rs, const Register& rt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister())); + VIXL_ASSERT(!rs.Aliases(rt)); + SingleEmissionCheckScope guard(this); + stxrh(rs, rt, dst); + } + void Svc(int code) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + svc(code); + } + void Sxtb(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + sxtb(rd, rn); + } + void Sxth(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + sxth(rd, rn); + } + void Sxtw(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + sxtw(rd, rn); + } + void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbl(vd, vn, vm); + } + void Tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbl(vd, vn, vn2, vm); + } + void Tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbl(vd, vn, vn2, vn3, vm); + } + void Tbl(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vn4, + const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbl(vd, vn, vn2, vn3, vn4, vm); + } + void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbx(vd, vn, vm); + } + void Tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbx(vd, vn, vn2, vm); + } + void Tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbx(vd, vn, vn2, vn3, vm); + } + void Tbx(const VRegister& vd, + const VRegister& vn, + const VRegister& vn2, + const VRegister& vn3, + const VRegister& vn4, + const VRegister& vm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + tbx(vd, vn, vn2, vn3, vn4, vm); + } + void Tbnz(const Register& rt, unsigned bit_pos, Label* label); + void Tbz(const Register& rt, unsigned bit_pos, Label* label); + void Ubfiz(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + ubfiz(rd, rn, lsb, width); + } + void Ubfm(const Register& rd, + const Register& rn, + unsigned immr, + unsigned imms) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + ubfm(rd, rn, immr, imms); + } + void Ubfx(const Register& rd, + const Register& rn, + unsigned lsb, + unsigned width) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + ubfx(rd, rn, lsb, width); + } + void Ucvtf(const VRegister& vd, const Register& rn, int fbits = 0) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + ucvtf(vd, rn, fbits); + } + void Udiv(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + udiv(rd, rn, rm); + } + void Umaddl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT(!ra.IsZero()); + SingleEmissionCheckScope guard(this); + umaddl(rd, rn, rm, ra); + } + void Umull(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + SingleEmissionCheckScope guard(this); + umull(rd, rn, rm); + } + void Umulh(const Register& xd, const Register& xn, const Register& xm) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!xd.IsZero()); + VIXL_ASSERT(!xn.IsZero()); + VIXL_ASSERT(!xm.IsZero()); + SingleEmissionCheckScope guard(this); + umulh(xd, xn, xm); + } + void Umsubl(const Register& rd, + const Register& rn, + const Register& rm, + const Register& ra) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + VIXL_ASSERT(!rm.IsZero()); + VIXL_ASSERT(!ra.IsZero()); + SingleEmissionCheckScope guard(this); + umsubl(rd, rn, rm, ra); + } + void Unreachable() { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + if (generate_simulator_code_) { + hlt(kUnreachableOpcode); + } else { + // Branch to 0 to generate a segfault. + // lr - kInstructionSize is the address of the offending instruction. + blr(xzr); + } + } + void Uxtb(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + uxtb(rd, rn); + } + void Uxth(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + uxth(rd, rn); + } + void Uxtw(const Register& rd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + VIXL_ASSERT(!rd.IsZero()); + VIXL_ASSERT(!rn.IsZero()); + SingleEmissionCheckScope guard(this); + uxtw(rd, rn); + } + +// NEON 3 vector register instructions. +#define NEON_3VREG_MACRO_LIST(V) \ + V(add, Add) \ + V(addhn, Addhn) \ + V(addhn2, Addhn2) \ + V(addp, Addp) \ + V(and_, And) \ + V(bic, Bic) \ + V(bif, Bif) \ + V(bit, Bit) \ + V(bsl, Bsl) \ + V(cmeq, Cmeq) \ + V(cmge, Cmge) \ + V(cmgt, Cmgt) \ + V(cmhi, Cmhi) \ + V(cmhs, Cmhs) \ + V(cmtst, Cmtst) \ + V(eor, Eor) \ + V(fabd, Fabd) \ + V(facge, Facge) \ + V(facgt, Facgt) \ + V(faddp, Faddp) \ + V(fcmeq, Fcmeq) \ + V(fcmge, Fcmge) \ + V(fcmgt, Fcmgt) \ + V(fmaxnmp, Fmaxnmp) \ + V(fmaxp, Fmaxp) \ + V(fminnmp, Fminnmp) \ + V(fminp, Fminp) \ + V(fmla, Fmla) \ + V(fmlal, Fmlal) \ + V(fmlal2, Fmlal2) \ + V(fmls, Fmls) \ + V(fmlsl, Fmlsl) \ + V(fmlsl2, Fmlsl2) \ + V(fmulx, Fmulx) \ + V(frecps, Frecps) \ + V(frsqrts, Frsqrts) \ + V(mla, Mla) \ + V(mls, Mls) \ + V(mul, Mul) \ + V(orn, Orn) \ + V(orr, Orr) \ + V(pmul, Pmul) \ + V(pmull, Pmull) \ + V(pmull2, Pmull2) \ + V(raddhn, Raddhn) \ + V(raddhn2, Raddhn2) \ + V(rsubhn, Rsubhn) \ + V(rsubhn2, Rsubhn2) \ + V(saba, Saba) \ + V(sabal, Sabal) \ + V(sabal2, Sabal2) \ + V(sabd, Sabd) \ + V(sabdl, Sabdl) \ + V(sabdl2, Sabdl2) \ + V(saddl, Saddl) \ + V(saddl2, Saddl2) \ + V(saddw, Saddw) \ + V(saddw2, Saddw2) \ + V(shadd, Shadd) \ + V(shsub, Shsub) \ + V(smax, Smax) \ + V(smaxp, Smaxp) \ + V(smin, Smin) \ + V(sminp, Sminp) \ + V(smlal, Smlal) \ + V(smlal2, Smlal2) \ + V(smlsl, Smlsl) \ + V(smlsl2, Smlsl2) \ + V(smull, Smull) \ + V(smull2, Smull2) \ + V(sqadd, Sqadd) \ + V(sqdmlal, Sqdmlal) \ + V(sqdmlal2, Sqdmlal2) \ + V(sqdmlsl, Sqdmlsl) \ + V(sqdmlsl2, Sqdmlsl2) \ + V(sqdmulh, Sqdmulh) \ + V(sqdmull, Sqdmull) \ + V(sqdmull2, Sqdmull2) \ + V(sqrdmulh, Sqrdmulh) \ + V(sdot, Sdot) \ + V(sqrdmlah, Sqrdmlah) \ + V(udot, Udot) \ + V(sqrdmlsh, Sqrdmlsh) \ + V(sqrshl, Sqrshl) \ + V(sqshl, Sqshl) \ + V(sqsub, Sqsub) \ + V(srhadd, Srhadd) \ + V(srshl, Srshl) \ + V(sshl, Sshl) \ + V(ssubl, Ssubl) \ + V(ssubl2, Ssubl2) \ + V(ssubw, Ssubw) \ + V(ssubw2, Ssubw2) \ + V(sub, Sub) \ + V(subhn, Subhn) \ + V(subhn2, Subhn2) \ + V(trn1, Trn1) \ + V(trn2, Trn2) \ + V(uaba, Uaba) \ + V(uabal, Uabal) \ + V(uabal2, Uabal2) \ + V(uabd, Uabd) \ + V(uabdl, Uabdl) \ + V(uabdl2, Uabdl2) \ + V(uaddl, Uaddl) \ + V(uaddl2, Uaddl2) \ + V(uaddw, Uaddw) \ + V(uaddw2, Uaddw2) \ + V(uhadd, Uhadd) \ + V(uhsub, Uhsub) \ + V(umax, Umax) \ + V(umaxp, Umaxp) \ + V(umin, Umin) \ + V(uminp, Uminp) \ + V(umlal, Umlal) \ + V(umlal2, Umlal2) \ + V(umlsl, Umlsl) \ + V(umlsl2, Umlsl2) \ + V(umull, Umull) \ + V(umull2, Umull2) \ + V(uqadd, Uqadd) \ + V(uqrshl, Uqrshl) \ + V(uqshl, Uqshl) \ + V(uqsub, Uqsub) \ + V(urhadd, Urhadd) \ + V(urshl, Urshl) \ + V(ushl, Ushl) \ + V(usubl, Usubl) \ + V(usubl2, Usubl2) \ + V(usubw, Usubw) \ + V(usubw2, Usubw2) \ + V(uzp1, Uzp1) \ + V(uzp2, Uzp2) \ + V(zip1, Zip1) \ + V(zip2, Zip2) + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(vd, vn, vm); \ + } + NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + +// NEON 2 vector register instructions. +#define NEON_2VREG_MACRO_LIST(V) \ + V(abs, Abs) \ + V(addp, Addp) \ + V(addv, Addv) \ + V(cls, Cls) \ + V(clz, Clz) \ + V(cnt, Cnt) \ + V(fabs, Fabs) \ + V(faddp, Faddp) \ + V(fcvtas, Fcvtas) \ + V(fcvtau, Fcvtau) \ + V(fcvtms, Fcvtms) \ + V(fcvtmu, Fcvtmu) \ + V(fcvtns, Fcvtns) \ + V(fcvtnu, Fcvtnu) \ + V(fcvtps, Fcvtps) \ + V(fcvtpu, Fcvtpu) \ + V(fmaxnmp, Fmaxnmp) \ + V(fmaxnmv, Fmaxnmv) \ + V(fmaxp, Fmaxp) \ + V(fmaxv, Fmaxv) \ + V(fminnmp, Fminnmp) \ + V(fminnmv, Fminnmv) \ + V(fminp, Fminp) \ + V(fminv, Fminv) \ + V(fneg, Fneg) \ + V(frecpe, Frecpe) \ + V(frecpx, Frecpx) \ + V(frinta, Frinta) \ + V(frinti, Frinti) \ + V(frintm, Frintm) \ + V(frintn, Frintn) \ + V(frintp, Frintp) \ + V(frintx, Frintx) \ + V(frintz, Frintz) \ + V(frsqrte, Frsqrte) \ + V(fsqrt, Fsqrt) \ + V(mov, Mov) \ + V(mvn, Mvn) \ + V(neg, Neg) \ + V(not_, Not) \ + V(rbit, Rbit) \ + V(rev16, Rev16) \ + V(rev32, Rev32) \ + V(rev64, Rev64) \ + V(sadalp, Sadalp) \ + V(saddlp, Saddlp) \ + V(saddlv, Saddlv) \ + V(smaxv, Smaxv) \ + V(sminv, Sminv) \ + V(sqabs, Sqabs) \ + V(sqneg, Sqneg) \ + V(sqxtn, Sqxtn) \ + V(sqxtn2, Sqxtn2) \ + V(sqxtun, Sqxtun) \ + V(sqxtun2, Sqxtun2) \ + V(suqadd, Suqadd) \ + V(sxtl, Sxtl) \ + V(sxtl2, Sxtl2) \ + V(uadalp, Uadalp) \ + V(uaddlp, Uaddlp) \ + V(uaddlv, Uaddlv) \ + V(umaxv, Umaxv) \ + V(uminv, Uminv) \ + V(uqxtn, Uqxtn) \ + V(uqxtn2, Uqxtn2) \ + V(urecpe, Urecpe) \ + V(ursqrte, Ursqrte) \ + V(usqadd, Usqadd) \ + V(uxtl, Uxtl) \ + V(uxtl2, Uxtl2) \ + V(xtn, Xtn) \ + V(xtn2, Xtn2) + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, const VRegister& vn) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(vd, vn); \ + } + NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + +// NEON 2 vector register with immediate instructions. +#define NEON_2VREG_FPIMM_MACRO_LIST(V) \ + V(fcmeq, Fcmeq) \ + V(fcmge, Fcmge) \ + V(fcmgt, Fcmgt) \ + V(fcmle, Fcmle) \ + V(fcmlt, Fcmlt) + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, const VRegister& vn, double imm) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(vd, vn, imm); \ + } + NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + +// NEON by element instructions. +#define NEON_BYELEMENT_MACRO_LIST(V) \ + V(fmul, Fmul) \ + V(fmla, Fmla) \ + V(fmlal, Fmlal) \ + V(fmlal2, Fmlal2) \ + V(fmls, Fmls) \ + V(fmlsl, Fmlsl) \ + V(fmlsl2, Fmlsl2) \ + V(fmulx, Fmulx) \ + V(mul, Mul) \ + V(mla, Mla) \ + V(mls, Mls) \ + V(sqdmulh, Sqdmulh) \ + V(sqrdmulh, Sqrdmulh) \ + V(sdot, Sdot) \ + V(sqrdmlah, Sqrdmlah) \ + V(udot, Udot) \ + V(sqrdmlsh, Sqrdmlsh) \ + V(sqdmull, Sqdmull) \ + V(sqdmull2, Sqdmull2) \ + V(sqdmlal, Sqdmlal) \ + V(sqdmlal2, Sqdmlal2) \ + V(sqdmlsl, Sqdmlsl) \ + V(sqdmlsl2, Sqdmlsl2) \ + V(smull, Smull) \ + V(smull2, Smull2) \ + V(smlal, Smlal) \ + V(smlal2, Smlal2) \ + V(smlsl, Smlsl) \ + V(smlsl2, Smlsl2) \ + V(umull, Umull) \ + V(umull2, Umull2) \ + V(umlal, Umlal) \ + V(umlal2, Umlal2) \ + V(umlsl, Umlsl) \ + V(umlsl2, Umlsl2) + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, \ + const VRegister& vn, \ + const VRegister& vm, \ + int vm_index) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(vd, vn, vm, vm_index); \ + } + NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + +#define NEON_2VREG_SHIFT_MACRO_LIST(V) \ + V(rshrn, Rshrn) \ + V(rshrn2, Rshrn2) \ + V(shl, Shl) \ + V(shll, Shll) \ + V(shll2, Shll2) \ + V(shrn, Shrn) \ + V(shrn2, Shrn2) \ + V(sli, Sli) \ + V(sqrshrn, Sqrshrn) \ + V(sqrshrn2, Sqrshrn2) \ + V(sqrshrun, Sqrshrun) \ + V(sqrshrun2, Sqrshrun2) \ + V(sqshl, Sqshl) \ + V(sqshlu, Sqshlu) \ + V(sqshrn, Sqshrn) \ + V(sqshrn2, Sqshrn2) \ + V(sqshrun, Sqshrun) \ + V(sqshrun2, Sqshrun2) \ + V(sri, Sri) \ + V(srshr, Srshr) \ + V(srsra, Srsra) \ + V(sshll, Sshll) \ + V(sshll2, Sshll2) \ + V(sshr, Sshr) \ + V(ssra, Ssra) \ + V(uqrshrn, Uqrshrn) \ + V(uqrshrn2, Uqrshrn2) \ + V(uqshl, Uqshl) \ + V(uqshrn, Uqshrn) \ + V(uqshrn2, Uqshrn2) \ + V(urshr, Urshr) \ + V(ursra, Ursra) \ + V(ushll, Ushll) \ + V(ushll2, Ushll2) \ + V(ushr, Ushr) \ + V(usra, Usra) + +#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \ + void MASM(const VRegister& vd, const VRegister& vn, int shift) { \ + VIXL_ASSERT(allow_macro_instructions_); \ + SingleEmissionCheckScope guard(this); \ + ASM(vd, vn, shift); \ + } + NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC) +#undef DEFINE_MACRO_ASM_FUNC + + void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + bic(vd, imm8, left_shift); + } + void Cmeq(const VRegister& vd, const VRegister& vn, int imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmeq(vd, vn, imm); + } + void Cmge(const VRegister& vd, const VRegister& vn, int imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmge(vd, vn, imm); + } + void Cmgt(const VRegister& vd, const VRegister& vn, int imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmgt(vd, vn, imm); + } + void Cmle(const VRegister& vd, const VRegister& vn, int imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmle(vd, vn, imm); + } + void Cmlt(const VRegister& vd, const VRegister& vn, int imm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + cmlt(vd, vn, imm); + } + void Dup(const VRegister& vd, const VRegister& vn, int index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + dup(vd, vn, index); + } + void Dup(const VRegister& vd, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + dup(vd, rn); + } + void Ext(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ext(vd, vn, vm, index); + } + void Fcadd(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int rot) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcadd(vd, vn, vm, rot); + } + void Fcmla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int vm_index, + int rot) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcmla(vd, vn, vm, vm_index, rot); + } + void Fcmla(const VRegister& vd, + const VRegister& vn, + const VRegister& vm, + int rot) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcmla(vd, vn, vm, rot); + } + void Ins(const VRegister& vd, + int vd_index, + const VRegister& vn, + int vn_index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ins(vd, vd_index, vn, vn_index); + } + void Ins(const VRegister& vd, int vd_index, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ins(vd, vd_index, rn); + } + void Ld1(const VRegister& vt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld1(vt, src); + } + void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld1(vt, vt2, src); + } + void Ld1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld1(vt, vt2, vt3, src); + } + void Ld1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld1(vt, vt2, vt3, vt4, src); + } + void Ld1(const VRegister& vt, int lane, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld1(vt, lane, src); + } + void Ld1r(const VRegister& vt, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld1r(vt, src); + } + void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld2(vt, vt2, src); + } + void Ld2(const VRegister& vt, + const VRegister& vt2, + int lane, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld2(vt, vt2, lane, src); + } + void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld2r(vt, vt2, src); + } + void Ld3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld3(vt, vt2, vt3, src); + } + void Ld3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + int lane, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld3(vt, vt2, vt3, lane, src); + } + void Ld3r(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld3r(vt, vt2, vt3, src); + } + void Ld4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld4(vt, vt2, vt3, vt4, src); + } + void Ld4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + int lane, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld4(vt, vt2, vt3, vt4, lane, src); + } + void Ld4r(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& src) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ld4r(vt, vt2, vt3, vt4, src); + } + void Mov(const VRegister& vd, + int vd_index, + const VRegister& vn, + int vn_index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mov(vd, vd_index, vn, vn_index); + } + void Mov(const VRegister& vd, const VRegister& vn, int index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mov(vd, vn, index); + } + void Mov(const VRegister& vd, int vd_index, const Register& rn) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mov(vd, vd_index, rn); + } + void Mov(const Register& rd, const VRegister& vn, int vn_index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mov(rd, vn, vn_index); + } + void Movi(const VRegister& vd, + uint64_t imm, + Shift shift = LSL, + int shift_amount = 0); + void Movi(const VRegister& vd, uint64_t hi, uint64_t lo); + void Mvni(const VRegister& vd, + const int imm8, + Shift shift = LSL, + const int shift_amount = 0) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + mvni(vd, imm8, shift, shift_amount); + } + void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + orr(vd, imm8, left_shift); + } + void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + scvtf(vd, vn, fbits); + } + void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + ucvtf(vd, vn, fbits); + } + void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtzs(vd, vn, fbits); + } + void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + fcvtzu(vd, vn, fbits); + } + void St1(const VRegister& vt, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st1(vt, dst); + } + void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st1(vt, vt2, dst); + } + void St1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st1(vt, vt2, vt3, dst); + } + void St1(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st1(vt, vt2, vt3, vt4, dst); + } + void St1(const VRegister& vt, int lane, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st1(vt, lane, dst); + } + void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st2(vt, vt2, dst); + } + void St3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st3(vt, vt2, vt3, dst); + } + void St4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st4(vt, vt2, vt3, vt4, dst); + } + void St2(const VRegister& vt, + const VRegister& vt2, + int lane, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st2(vt, vt2, lane, dst); + } + void St3(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + int lane, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st3(vt, vt2, vt3, lane, dst); + } + void St4(const VRegister& vt, + const VRegister& vt2, + const VRegister& vt3, + const VRegister& vt4, + int lane, + const MemOperand& dst) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + st4(vt, vt2, vt3, vt4, lane, dst); + } + void Smov(const Register& rd, const VRegister& vn, int vn_index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + smov(rd, vn, vn_index); + } + void Umov(const Register& rd, const VRegister& vn, int vn_index) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + umov(rd, vn, vn_index); + } + void Crc32b(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + crc32b(rd, rn, rm); + } + void Crc32h(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + crc32h(rd, rn, rm); + } + void Crc32w(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + crc32w(rd, rn, rm); + } + void Crc32x(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + crc32x(rd, rn, rm); + } + void Crc32cb(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + crc32cb(rd, rn, rm); + } + void Crc32ch(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + crc32ch(rd, rn, rm); + } + void Crc32cw(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + crc32cw(rd, rn, rm); + } + void Crc32cx(const Register& rd, const Register& rn, const Register& rm) { + VIXL_ASSERT(allow_macro_instructions_); + SingleEmissionCheckScope guard(this); + crc32cx(rd, rn, rm); + } + + template + Literal* CreateLiteralDestroyedWithPool(T value) { + return new Literal(value, + &literal_pool_, + RawLiteral::kDeletedOnPoolDestruction); + } + + template + Literal* CreateLiteralDestroyedWithPool(T high64, T low64) { + return new Literal(high64, + low64, + &literal_pool_, + RawLiteral::kDeletedOnPoolDestruction); + } + + // Push the system stack pointer (sp) down to allow the same to be done to + // the current stack pointer (according to StackPointer()). This must be + // called _before_ accessing the memory. + // + // This is necessary when pushing or otherwise adding things to the stack, to + // satisfy the AAPCS64 constraint that the memory below the system stack + // pointer is not accessed. + // + // This method asserts that StackPointer() is not sp, since the call does + // not make sense in that context. + // + // TODO: This method can only accept values of 'space' that can be encoded in + // one instruction. Refer to the implementation for details. + void BumpSystemStackPointer(const Operand& space); + + virtual bool AllowMacroInstructions() const VIXL_OVERRIDE { + return allow_macro_instructions_; + } + + virtual bool ArePoolsBlocked() const VIXL_OVERRIDE { + return IsLiteralPoolBlocked() && IsVeneerPoolBlocked(); + } + + void SetGenerateSimulatorCode(bool value) { + generate_simulator_code_ = value; + } + + bool GenerateSimulatorCode() const { return generate_simulator_code_; } + + size_t GetLiteralPoolSize() const { return literal_pool_.GetSize(); } + VIXL_DEPRECATED("GetLiteralPoolSize", size_t LiteralPoolSize() const) { + return GetLiteralPoolSize(); + } + + size_t GetLiteralPoolMaxSize() const { return literal_pool_.GetMaxSize(); } + VIXL_DEPRECATED("GetLiteralPoolMaxSize", size_t LiteralPoolMaxSize() const) { + return GetLiteralPoolMaxSize(); + } + + size_t GetVeneerPoolMaxSize() const { return veneer_pool_.GetMaxSize(); } + VIXL_DEPRECATED("GetVeneerPoolMaxSize", size_t VeneerPoolMaxSize() const) { + return GetVeneerPoolMaxSize(); + } + + // The number of unresolved branches that may require a veneer. + int GetNumberOfPotentialVeneers() const { + return veneer_pool_.GetNumberOfPotentialVeneers(); + } + VIXL_DEPRECATED("GetNumberOfPotentialVeneers", + int NumberOfPotentialVeneers() const) { + return GetNumberOfPotentialVeneers(); + } + + ptrdiff_t GetNextCheckPoint() const { + ptrdiff_t next_checkpoint_for_pools = + std::min(literal_pool_.GetCheckpoint(), veneer_pool_.GetCheckpoint()); + return std::min(next_checkpoint_for_pools, + static_cast(GetBuffer().GetCapacity())); + } + VIXL_DEPRECATED("GetNextCheckPoint", ptrdiff_t NextCheckPoint()) { + return GetNextCheckPoint(); + } + + void EmitLiteralPool(LiteralPool::EmitOption option) { + if (!literal_pool_.IsEmpty()) literal_pool_.Emit(option); + + checkpoint_ = GetNextCheckPoint(); + recommended_checkpoint_ = literal_pool_.GetNextRecommendedCheckpoint(); + } + + void CheckEmitFor(size_t amount); + void EnsureEmitFor(size_t amount) { + ptrdiff_t offset = amount; + ptrdiff_t max_pools_size = + literal_pool_.GetMaxSize() + veneer_pool_.GetMaxSize(); + ptrdiff_t cursor = GetCursorOffset(); + if ((cursor >= recommended_checkpoint_) || + ((cursor + offset + max_pools_size) >= checkpoint_)) { + CheckEmitFor(amount); + } + } + + void CheckEmitPoolsFor(size_t amount); + virtual void EnsureEmitPoolsFor(size_t amount) VIXL_OVERRIDE { + ptrdiff_t offset = amount; + ptrdiff_t max_pools_size = + literal_pool_.GetMaxSize() + veneer_pool_.GetMaxSize(); + ptrdiff_t cursor = GetCursorOffset(); + if ((cursor >= recommended_checkpoint_) || + ((cursor + offset + max_pools_size) >= checkpoint_)) { + CheckEmitPoolsFor(amount); + } + } + + // Set the current stack pointer, but don't generate any code. + void SetStackPointer(const Register& stack_pointer) { + VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(stack_pointer)); + sp_ = stack_pointer; + } + + // Return the current stack pointer, as set by SetStackPointer. + const Register& StackPointer() const { return sp_; } + + CPURegList* GetScratchRegisterList() { return &tmp_list_; } + VIXL_DEPRECATED("GetScratchRegisterList", CPURegList* TmpList()) { + return GetScratchRegisterList(); + } + + CPURegList* GetScratchFPRegisterList() { return &fptmp_list_; } + VIXL_DEPRECATED("GetScratchFPRegisterList", CPURegList* FPTmpList()) { + return GetScratchFPRegisterList(); + } + + // Get or set the current (most-deeply-nested) UseScratchRegisterScope. + void SetCurrentScratchRegisterScope(UseScratchRegisterScope* scope) { + current_scratch_scope_ = scope; + } + UseScratchRegisterScope* GetCurrentScratchRegisterScope() { + return current_scratch_scope_; + } + + // Like printf, but print at run-time from generated code. + // + // The caller must ensure that arguments for floating-point placeholders + // (such as %e, %f or %g) are VRegisters in format 1S or 1D, and that + // arguments for integer placeholders are Registers. + // + // At the moment it is only possible to print the value of sp if it is the + // current stack pointer. Otherwise, the MacroAssembler will automatically + // update sp on every push (using BumpSystemStackPointer), so determining its + // value is difficult. + // + // Format placeholders that refer to more than one argument, or to a specific + // argument, are not supported. This includes formats like "%1$d" or "%.*d". + // + // This function automatically preserves caller-saved registers so that + // calling code can use Printf at any point without having to worry about + // corruption. The preservation mechanism generates a lot of code. If this is + // a problem, preserve the important registers manually and then call + // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are + // implicitly preserved. + void Printf(const char* format, + CPURegister arg0 = NoCPUReg, + CPURegister arg1 = NoCPUReg, + CPURegister arg2 = NoCPUReg, + CPURegister arg3 = NoCPUReg); + + // Like Printf, but don't preserve any caller-saved registers, not even 'lr'. + // + // The return code from the system printf call will be returned in x0. + void PrintfNoPreserve(const char* format, + const CPURegister& arg0 = NoCPUReg, + const CPURegister& arg1 = NoCPUReg, + const CPURegister& arg2 = NoCPUReg, + const CPURegister& arg3 = NoCPUReg); + + // Trace control when running the debug simulator. + // + // For example: + // + // __ Trace(LOG_REGS, TRACE_ENABLE); + // Will add registers to the trace if it wasn't already the case. + // + // __ Trace(LOG_DISASM, TRACE_DISABLE); + // Will stop logging disassembly. It has no effect if the disassembly wasn't + // already being logged. + void Trace(TraceParameters parameters, TraceCommand command); + + // Log the requested data independently of what is being traced. + // + // For example: + // + // __ Log(LOG_FLAGS) + // Will output the flags. + void Log(TraceParameters parameters); + + // Enable or disable instrumentation when an Instrument visitor is attached to + // the simulator. + void EnableInstrumentation(); + void DisableInstrumentation(); + + // Add a marker to the instrumentation data produced by an Instrument visitor. + // The name is a two character string that will be attached to the marker in + // the output data. + void AnnotateInstrumentation(const char* marker_name); + + // Enable or disable CPU features dynamically. This mechanism allows users to + // strictly check the use of CPU features in different regions of code. + void SetSimulatorCPUFeatures(const CPUFeatures& features); + void EnableSimulatorCPUFeatures(const CPUFeatures& features); + void DisableSimulatorCPUFeatures(const CPUFeatures& features); + void SaveSimulatorCPUFeatures(); + void RestoreSimulatorCPUFeatures(); + + LiteralPool* GetLiteralPool() { return &literal_pool_; } + +// Support for simulated runtime calls. + +// `CallRuntime` requires variadic templating, that is only available from +// C++11. +#if __cplusplus >= 201103L +#define VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT +#endif // #if __cplusplus >= 201103L + +#ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT + template + void CallRuntimeHelper(R (*function)(P...), RuntimeCallType call_type); + + template + void CallRuntime(R (*function)(P...)) { + CallRuntimeHelper(function, kCallRuntime); + } + + template + void TailCallRuntime(R (*function)(P...)) { + CallRuntimeHelper(function, kTailCallRuntime); + } +#endif // #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT + + protected: + void BlockLiteralPool() { literal_pool_.Block(); } + void ReleaseLiteralPool() { literal_pool_.Release(); } + bool IsLiteralPoolBlocked() const { return literal_pool_.IsBlocked(); } + void BlockVeneerPool() { veneer_pool_.Block(); } + void ReleaseVeneerPool() { veneer_pool_.Release(); } + bool IsVeneerPoolBlocked() const { return veneer_pool_.IsBlocked(); } + + virtual void BlockPools() VIXL_OVERRIDE { + BlockLiteralPool(); + BlockVeneerPool(); + } + + virtual void ReleasePools() VIXL_OVERRIDE { + ReleaseLiteralPool(); + ReleaseVeneerPool(); + } + + // The scopes below need to able to block and release a particular pool. + // TODO: Consider removing those scopes or move them to + // code-generation-scopes-vixl.h. + friend class BlockPoolsScope; + friend class BlockLiteralPoolScope; + friend class BlockVeneerPoolScope; + + virtual void SetAllowMacroInstructions(bool value) VIXL_OVERRIDE { + allow_macro_instructions_ = value; + } + + // Helper used to query information about code generation and to generate + // code for `csel`. + // Here and for the related helpers below: + // - Code is generated when `masm` is not `NULL`. + // - On return and when set, `should_synthesise_left` and + // `should_synthesise_right` will indicate whether `left` and `right` + // should be synthesized in a temporary register. + static void CselHelper(MacroAssembler* masm, + const Register& rd, + Operand left, + Operand right, + Condition cond, + bool* should_synthesise_left = NULL, + bool* should_synthesise_right = NULL); + + // The helper returns `true` if it can handle the specified arguments. + // Also see comments for `CselHelper()`. + static bool CselSubHelperTwoImmediates(MacroAssembler* masm, + const Register& rd, + int64_t left, + int64_t right, + Condition cond, + bool* should_synthesise_left, + bool* should_synthesise_right); + + // See comments for `CselHelper()`. + static bool CselSubHelperTwoOrderedImmediates(MacroAssembler* masm, + const Register& rd, + int64_t left, + int64_t right, + Condition cond); + + // See comments for `CselHelper()`. + static void CselSubHelperRightSmallImmediate(MacroAssembler* masm, + UseScratchRegisterScope* temps, + const Register& rd, + const Operand& left, + const Operand& right, + Condition cond, + bool* should_synthesise_left); + + private: + // The actual Push and Pop implementations. These don't generate any code + // other than that required for the push or pop. This allows + // (Push|Pop)CPURegList to bundle together setup code for a large block of + // registers. + // + // Note that size is per register, and is specified in bytes. + void PushHelper(int count, + int size, + const CPURegister& src0, + const CPURegister& src1, + const CPURegister& src2, + const CPURegister& src3); + void PopHelper(int count, + int size, + const CPURegister& dst0, + const CPURegister& dst1, + const CPURegister& dst2, + const CPURegister& dst3); + + void Movi16bitHelper(const VRegister& vd, uint64_t imm); + void Movi32bitHelper(const VRegister& vd, uint64_t imm); + void Movi64bitHelper(const VRegister& vd, uint64_t imm); + + // Perform necessary maintenance operations before a push or pop. + // + // Note that size is per register, and is specified in bytes. + void PrepareForPush(int count, int size); + void PrepareForPop(int count, int size); + + // The actual implementation of load and store operations for CPURegList. + enum LoadStoreCPURegListAction { kLoad, kStore }; + void LoadStoreCPURegListHelper(LoadStoreCPURegListAction operation, + CPURegList registers, + const MemOperand& mem); + // Returns a MemOperand suitable for loading or storing a CPURegList at `dst`. + // This helper may allocate registers from `scratch_scope` and generate code + // to compute an intermediate address. The resulting MemOperand is only valid + // as long as `scratch_scope` remains valid. + MemOperand BaseMemOperandForLoadStoreCPURegList( + const CPURegList& registers, + const MemOperand& mem, + UseScratchRegisterScope* scratch_scope); + + bool LabelIsOutOfRange(Label* label, ImmBranchType branch_type) { + return !Instruction::IsValidImmPCOffset(branch_type, + label->GetLocation() - + GetCursorOffset()); + } + + void ConfigureSimulatorCPUFeaturesHelper(const CPUFeatures& features, + DebugHltOpcode action); + + // Tell whether any of the macro instruction can be used. When false the + // MacroAssembler will assert if a method which can emit a variable number + // of instructions is called. + bool allow_macro_instructions_; + + // Indicates whether we should generate simulator or native code. + bool generate_simulator_code_; + + // The register to use as a stack pointer for stack operations. + Register sp_; + + // Scratch registers available for use by the MacroAssembler. + CPURegList tmp_list_; + CPURegList fptmp_list_; + + UseScratchRegisterScope* current_scratch_scope_; + + LiteralPool literal_pool_; + VeneerPool veneer_pool_; + + ptrdiff_t checkpoint_; + ptrdiff_t recommended_checkpoint_; + + friend class Pool; + friend class LiteralPool; +}; + + +inline size_t VeneerPool::GetOtherPoolsMaxSize() const { + return masm_->GetLiteralPoolMaxSize(); +} + + +inline size_t LiteralPool::GetOtherPoolsMaxSize() const { + return masm_->GetVeneerPoolMaxSize(); +} + + +inline void LiteralPool::SetNextRecommendedCheckpoint(ptrdiff_t offset) { + masm_->recommended_checkpoint_ = + std::min(masm_->recommended_checkpoint_, offset); + recommended_checkpoint_ = offset; +} + +class InstructionAccurateScope : public ExactAssemblyScope { + public: + VIXL_DEPRECATED("ExactAssemblyScope", + InstructionAccurateScope(MacroAssembler* masm, + int64_t count, + SizePolicy size_policy = kExactSize)) + : ExactAssemblyScope(masm, count * kInstructionSize, size_policy) {} +}; + +class BlockLiteralPoolScope { + public: + explicit BlockLiteralPoolScope(MacroAssembler* masm) : masm_(masm) { + masm_->BlockLiteralPool(); + } + + ~BlockLiteralPoolScope() { masm_->ReleaseLiteralPool(); } + + private: + MacroAssembler* masm_; +}; + + +class BlockVeneerPoolScope { + public: + explicit BlockVeneerPoolScope(MacroAssembler* masm) : masm_(masm) { + masm_->BlockVeneerPool(); + } + + ~BlockVeneerPoolScope() { masm_->ReleaseVeneerPool(); } + + private: + MacroAssembler* masm_; +}; + + +class BlockPoolsScope { + public: + explicit BlockPoolsScope(MacroAssembler* masm) : masm_(masm) { + masm_->BlockPools(); + } + + ~BlockPoolsScope() { masm_->ReleasePools(); } + + private: + MacroAssembler* masm_; +}; + + +// This scope utility allows scratch registers to be managed safely. The +// MacroAssembler's GetScratchRegisterList() (and GetScratchFPRegisterList()) is +// used as a pool of scratch registers. These registers can be allocated on +// demand, and will be returned at the end of the scope. +// +// When the scope ends, the MacroAssembler's lists will be restored to their +// original state, even if the lists were modified by some other means. +class UseScratchRegisterScope { + public: + // This constructor implicitly calls `Open` to initialise the scope (`masm` + // must not be `NULL`), so it is ready to use immediately after it has been + // constructed. + explicit UseScratchRegisterScope(MacroAssembler* masm) + : masm_(NULL), parent_(NULL), old_available_(0), old_availablefp_(0) { + Open(masm); + } + // This constructor does not implicitly initialise the scope. Instead, the + // user is required to explicitly call the `Open` function before using the + // scope. + UseScratchRegisterScope() + : masm_(NULL), parent_(NULL), old_available_(0), old_availablefp_(0) {} + + // This function performs the actual initialisation work. + void Open(MacroAssembler* masm); + + // The destructor always implicitly calls the `Close` function. + ~UseScratchRegisterScope() { Close(); } + + // This function performs the cleaning-up work. It must succeed even if the + // scope has not been opened. It is safe to call multiple times. + void Close(); + + + bool IsAvailable(const CPURegister& reg) const; + + + // Take a register from the appropriate temps list. It will be returned + // automatically when the scope ends. + Register AcquireW() { + return AcquireNextAvailable(masm_->GetScratchRegisterList()).W(); + } + Register AcquireX() { + return AcquireNextAvailable(masm_->GetScratchRegisterList()).X(); + } + VRegister AcquireH() { + return AcquireNextAvailable(masm_->GetScratchFPRegisterList()).H(); + } + VRegister AcquireS() { + return AcquireNextAvailable(masm_->GetScratchFPRegisterList()).S(); + } + VRegister AcquireD() { + return AcquireNextAvailable(masm_->GetScratchFPRegisterList()).D(); + } + + + Register AcquireRegisterOfSize(int size_in_bits); + Register AcquireSameSizeAs(const Register& reg) { + return AcquireRegisterOfSize(reg.GetSizeInBits()); + } + VRegister AcquireVRegisterOfSize(int size_in_bits); + VRegister AcquireSameSizeAs(const VRegister& reg) { + return AcquireVRegisterOfSize(reg.GetSizeInBits()); + } + CPURegister AcquireCPURegisterOfSize(int size_in_bits) { + return masm_->GetScratchRegisterList()->IsEmpty() + ? CPURegister(AcquireVRegisterOfSize(size_in_bits)) + : CPURegister(AcquireRegisterOfSize(size_in_bits)); + } + + + // Explicitly release an acquired (or excluded) register, putting it back in + // the appropriate temps list. + void Release(const CPURegister& reg); + + + // Make the specified registers available as scratch registers for the + // duration of this scope. + void Include(const CPURegList& list); + void Include(const Register& reg1, + const Register& reg2 = NoReg, + const Register& reg3 = NoReg, + const Register& reg4 = NoReg); + void Include(const VRegister& reg1, + const VRegister& reg2 = NoVReg, + const VRegister& reg3 = NoVReg, + const VRegister& reg4 = NoVReg); + + + // Make sure that the specified registers are not available in this scope. + // This can be used to prevent helper functions from using sensitive + // registers, for example. + void Exclude(const CPURegList& list); + void Exclude(const Register& reg1, + const Register& reg2 = NoReg, + const Register& reg3 = NoReg, + const Register& reg4 = NoReg); + void Exclude(const VRegister& reg1, + const VRegister& reg2 = NoVReg, + const VRegister& reg3 = NoVReg, + const VRegister& reg4 = NoVReg); + void Exclude(const CPURegister& reg1, + const CPURegister& reg2 = NoCPUReg, + const CPURegister& reg3 = NoCPUReg, + const CPURegister& reg4 = NoCPUReg); + + + // Prevent any scratch registers from being used in this scope. + void ExcludeAll(); + + private: + static CPURegister AcquireNextAvailable(CPURegList* available); + + static void ReleaseByCode(CPURegList* available, int code); + + static void ReleaseByRegList(CPURegList* available, RegList regs); + + static void IncludeByRegList(CPURegList* available, RegList exclude); + + static void ExcludeByRegList(CPURegList* available, RegList exclude); + + // The MacroAssembler maintains a list of available scratch registers, and + // also keeps track of the most recently-opened scope so that on destruction + // we can check that scopes do not outlive their parents. + MacroAssembler* masm_; + UseScratchRegisterScope* parent_; + + // The state of the available lists at the start of this scope. + RegList old_available_; // kRegister + RegList old_availablefp_; // kVRegister + + // Disallow copy constructor and operator=. + VIXL_DEBUG_NO_RETURN UseScratchRegisterScope(const UseScratchRegisterScope&) { + VIXL_UNREACHABLE(); + } + VIXL_DEBUG_NO_RETURN void operator=(const UseScratchRegisterScope&) { + VIXL_UNREACHABLE(); + } +}; + + +// Like CPUFeaturesScope, but also generate Simulation pseudo-instructions to +// control a Simulator's CPUFeatures dynamically. +// +// One major difference from CPUFeaturesScope is that this scope cannot offer +// a writable "CPUFeatures* GetCPUFeatures()", because every write to the +// features needs a corresponding macro instruction. +class SimulationCPUFeaturesScope { + public: + explicit SimulationCPUFeaturesScope( + MacroAssembler* masm, + CPUFeatures::Feature feature0 = CPUFeatures::kNone, + CPUFeatures::Feature feature1 = CPUFeatures::kNone, + CPUFeatures::Feature feature2 = CPUFeatures::kNone, + CPUFeatures::Feature feature3 = CPUFeatures::kNone) + : masm_(masm), + cpu_features_scope_(masm, feature0, feature1, feature2, feature3) { + masm_->SaveSimulatorCPUFeatures(); + masm_->EnableSimulatorCPUFeatures( + CPUFeatures(feature0, feature1, feature2, feature3)); + } + + SimulationCPUFeaturesScope(MacroAssembler* masm, const CPUFeatures& other) + : masm_(masm), cpu_features_scope_(masm, other) { + masm_->SaveSimulatorCPUFeatures(); + masm_->EnableSimulatorCPUFeatures(other); + } + + ~SimulationCPUFeaturesScope() { masm_->RestoreSimulatorCPUFeatures(); } + + const CPUFeatures* GetCPUFeatures() const { + return cpu_features_scope_.GetCPUFeatures(); + } + + void SetCPUFeatures(const CPUFeatures& cpu_features) { + cpu_features_scope_.SetCPUFeatures(cpu_features); + masm_->SetSimulatorCPUFeatures(cpu_features); + } + + private: + MacroAssembler* masm_; + CPUFeaturesScope cpu_features_scope_; +}; + + +// Variadic templating is only available from C++11. +#ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT + +// `R` stands for 'return type', and `P` for 'parameter types'. +template +void MacroAssembler::CallRuntimeHelper(R (*function)(P...), + RuntimeCallType call_type) { + if (generate_simulator_code_) { +#ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT + uintptr_t runtime_call_wrapper_address = reinterpret_cast( + &(Simulator::RuntimeCallStructHelper::Wrapper)); + uintptr_t function_address = reinterpret_cast(function); + + EmissionCheckScope guard(this, + kRuntimeCallLength, + CodeBufferCheckScope::kExactSize); + Label start; + bind(&start); + { + ExactAssemblyScope scope(this, kInstructionSize); + hlt(kRuntimeCallOpcode); + } + VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) == + kRuntimeCallWrapperOffset); + dc(runtime_call_wrapper_address); + VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) == + kRuntimeCallFunctionOffset); + dc(function_address); + VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) == kRuntimeCallTypeOffset); + dc32(call_type); + VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) == kRuntimeCallLength); +#else + VIXL_UNREACHABLE(); +#endif // #ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT + } else { + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireX(); + Mov(temp, reinterpret_cast(function)); + if (call_type == kTailCallRuntime) { + Br(temp); + } else { + VIXL_ASSERT(call_type == kCallRuntime); + Blr(temp); + } + } +} + +#endif // #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT + +} // namespace aarch64 + +// Required InvalSet template specialisations. +// TODO: These template specialisations should not live in this file. Move +// VeneerPool out of the aarch64 namespace in order to share its implementation +// later. +template <> +inline ptrdiff_t InvalSet:: + GetKey(const aarch64::VeneerPool::BranchInfo& branch_info) { + return branch_info.first_unreacheable_pc_; +} +template <> +inline void InvalSet:: + SetKey(aarch64::VeneerPool::BranchInfo* branch_info, ptrdiff_t key) { + branch_info->first_unreacheable_pc_ = key; +} + +} // namespace vixl + +#endif // VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/operands-aarch64.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/operands-aarch64.cc new file mode 100644 index 00000000..20364616 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/operands-aarch64.cc @@ -0,0 +1,528 @@ +// Copyright 2016, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "operands-aarch64.h" + +namespace vixl { +namespace aarch64 { + +// CPURegList utilities. +CPURegister CPURegList::PopLowestIndex() { + if (IsEmpty()) { + return NoCPUReg; + } + int index = CountTrailingZeros(list_); + VIXL_ASSERT((1 << index) & list_); + Remove(index); + return CPURegister(index, size_, type_); +} + + +CPURegister CPURegList::PopHighestIndex() { + VIXL_ASSERT(IsValid()); + if (IsEmpty()) { + return NoCPUReg; + } + int index = CountLeadingZeros(list_); + index = kRegListSizeInBits - 1 - index; + VIXL_ASSERT((1 << index) & list_); + Remove(index); + return CPURegister(index, size_, type_); +} + + +bool CPURegList::IsValid() const { + if ((type_ == CPURegister::kRegister) || (type_ == CPURegister::kVRegister)) { + bool is_valid = true; + // Try to create a CPURegister for each element in the list. + for (int i = 0; i < kRegListSizeInBits; i++) { + if (((list_ >> i) & 1) != 0) { + is_valid &= CPURegister(i, size_, type_).IsValid(); + } + } + return is_valid; + } else if (type_ == CPURegister::kNoRegister) { + // We can't use IsEmpty here because that asserts IsValid(). + return list_ == 0; + } else { + return false; + } +} + + +void CPURegList::RemoveCalleeSaved() { + if (GetType() == CPURegister::kRegister) { + Remove(GetCalleeSaved(GetRegisterSizeInBits())); + } else if (GetType() == CPURegister::kVRegister) { + Remove(GetCalleeSavedV(GetRegisterSizeInBits())); + } else { + VIXL_ASSERT(GetType() == CPURegister::kNoRegister); + VIXL_ASSERT(IsEmpty()); + // The list must already be empty, so do nothing. + } +} + + +CPURegList CPURegList::Union(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3) { + return Union(list_1, Union(list_2, list_3)); +} + + +CPURegList CPURegList::Union(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3, + const CPURegList& list_4) { + return Union(Union(list_1, list_2), Union(list_3, list_4)); +} + + +CPURegList CPURegList::Intersection(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3) { + return Intersection(list_1, Intersection(list_2, list_3)); +} + + +CPURegList CPURegList::Intersection(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3, + const CPURegList& list_4) { + return Intersection(Intersection(list_1, list_2), + Intersection(list_3, list_4)); +} + + +CPURegList CPURegList::GetCalleeSaved(unsigned size) { + return CPURegList(CPURegister::kRegister, size, 19, 29); +} + + +CPURegList CPURegList::GetCalleeSavedV(unsigned size) { + return CPURegList(CPURegister::kVRegister, size, 8, 15); +} + + +CPURegList CPURegList::GetCallerSaved(unsigned size) { + // Registers x0-x18 and lr (x30) are caller-saved. + CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18); + // Do not use lr directly to avoid initialisation order fiasco bugs for users. + list.Combine(Register(30, kXRegSize)); + return list; +} + + +CPURegList CPURegList::GetCallerSavedV(unsigned size) { + // Registers d0-d7 and d16-d31 are caller-saved. + CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7); + list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31)); + return list; +} + + +const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved(); +const CPURegList kCalleeSavedV = CPURegList::GetCalleeSavedV(); +const CPURegList kCallerSaved = CPURegList::GetCallerSaved(); +const CPURegList kCallerSavedV = CPURegList::GetCallerSavedV(); + + +// Registers. +#define WREG(n) w##n, +const Register Register::wregisters[] = {AARCH64_REGISTER_CODE_LIST(WREG)}; +#undef WREG + +#define XREG(n) x##n, +const Register Register::xregisters[] = {AARCH64_REGISTER_CODE_LIST(XREG)}; +#undef XREG + +#define BREG(n) b##n, +const VRegister VRegister::bregisters[] = {AARCH64_REGISTER_CODE_LIST(BREG)}; +#undef BREG + +#define HREG(n) h##n, +const VRegister VRegister::hregisters[] = {AARCH64_REGISTER_CODE_LIST(HREG)}; +#undef HREG + +#define SREG(n) s##n, +const VRegister VRegister::sregisters[] = {AARCH64_REGISTER_CODE_LIST(SREG)}; +#undef SREG + +#define DREG(n) d##n, +const VRegister VRegister::dregisters[] = {AARCH64_REGISTER_CODE_LIST(DREG)}; +#undef DREG + +#define QREG(n) q##n, +const VRegister VRegister::qregisters[] = {AARCH64_REGISTER_CODE_LIST(QREG)}; +#undef QREG + +#define VREG(n) v##n, +const VRegister VRegister::vregisters[] = {AARCH64_REGISTER_CODE_LIST(VREG)}; +#undef VREG + + +const Register& Register::GetWRegFromCode(unsigned code) { + if (code == kSPRegInternalCode) { + return wsp; + } else { + VIXL_ASSERT(code < kNumberOfRegisters); + return wregisters[code]; + } +} + + +const Register& Register::GetXRegFromCode(unsigned code) { + if (code == kSPRegInternalCode) { + return sp; + } else { + VIXL_ASSERT(code < kNumberOfRegisters); + return xregisters[code]; + } +} + + +const VRegister& VRegister::GetBRegFromCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return bregisters[code]; +} + + +const VRegister& VRegister::GetHRegFromCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return hregisters[code]; +} + + +const VRegister& VRegister::GetSRegFromCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return sregisters[code]; +} + + +const VRegister& VRegister::GetDRegFromCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return dregisters[code]; +} + + +const VRegister& VRegister::GetQRegFromCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return qregisters[code]; +} + + +const VRegister& VRegister::GetVRegFromCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return vregisters[code]; +} + + +const Register& CPURegister::W() const { + VIXL_ASSERT(IsValidRegister()); + return Register::GetWRegFromCode(code_); +} + + +const Register& CPURegister::X() const { + VIXL_ASSERT(IsValidRegister()); + return Register::GetXRegFromCode(code_); +} + + +const VRegister& CPURegister::B() const { + VIXL_ASSERT(IsValidVRegister()); + return VRegister::GetBRegFromCode(code_); +} + + +const VRegister& CPURegister::H() const { + VIXL_ASSERT(IsValidVRegister()); + return VRegister::GetHRegFromCode(code_); +} + + +const VRegister& CPURegister::S() const { + VIXL_ASSERT(IsValidVRegister()); + return VRegister::GetSRegFromCode(code_); +} + + +const VRegister& CPURegister::D() const { + VIXL_ASSERT(IsValidVRegister()); + return VRegister::GetDRegFromCode(code_); +} + + +const VRegister& CPURegister::Q() const { + VIXL_ASSERT(IsValidVRegister()); + return VRegister::GetQRegFromCode(code_); +} + + +const VRegister& CPURegister::V() const { + VIXL_ASSERT(IsValidVRegister()); + return VRegister::GetVRegFromCode(code_); +} + + +// Operand. +Operand::Operand(int64_t immediate) + : immediate_(immediate), + reg_(NoReg), + shift_(NO_SHIFT), + extend_(NO_EXTEND), + shift_amount_(0) {} + + +Operand::Operand(Register reg, Shift shift, unsigned shift_amount) + : reg_(reg), + shift_(shift), + extend_(NO_EXTEND), + shift_amount_(shift_amount) { + VIXL_ASSERT(shift != MSL); + VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize)); + VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize)); + VIXL_ASSERT(!reg.IsSP()); +} + + +Operand::Operand(Register reg, Extend extend, unsigned shift_amount) + : reg_(reg), + shift_(NO_SHIFT), + extend_(extend), + shift_amount_(shift_amount) { + VIXL_ASSERT(reg.IsValid()); + VIXL_ASSERT(shift_amount <= 4); + VIXL_ASSERT(!reg.IsSP()); + + // Extend modes SXTX and UXTX require a 64-bit register. + VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX))); +} + + +bool Operand::IsImmediate() const { return reg_.Is(NoReg); } + + +bool Operand::IsPlainRegister() const { + return reg_.IsValid() && + (((shift_ == NO_SHIFT) && (extend_ == NO_EXTEND)) || + // No-op shifts. + ((shift_ != NO_SHIFT) && (shift_amount_ == 0)) || + // No-op extend operations. + // We can't include [US]XTW here without knowing more about the + // context; they are only no-ops for 32-bit operations. + // + // For example, this operand could be replaced with w1: + // __ Add(w0, w0, Operand(w1, UXTW)); + // However, no plain register can replace it in this context: + // __ Add(x0, x0, Operand(w1, UXTW)); + (((extend_ == UXTX) || (extend_ == SXTX)) && (shift_amount_ == 0))); +} + + +bool Operand::IsShiftedRegister() const { + return reg_.IsValid() && (shift_ != NO_SHIFT); +} + + +bool Operand::IsExtendedRegister() const { + return reg_.IsValid() && (extend_ != NO_EXTEND); +} + + +bool Operand::IsZero() const { + if (IsImmediate()) { + return GetImmediate() == 0; + } else { + return GetRegister().IsZero(); + } +} + + +Operand Operand::ToExtendedRegister() const { + VIXL_ASSERT(IsShiftedRegister()); + VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4)); + return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_); +} + + +// MemOperand +MemOperand::MemOperand() + : base_(NoReg), + regoffset_(NoReg), + offset_(0), + addrmode_(Offset), + shift_(NO_SHIFT), + extend_(NO_EXTEND) {} + + +MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode) + : base_(base), + regoffset_(NoReg), + offset_(offset), + addrmode_(addrmode), + shift_(NO_SHIFT), + extend_(NO_EXTEND), + shift_amount_(0) { + VIXL_ASSERT(base.Is64Bits() && !base.IsZero()); +} + + +MemOperand::MemOperand(Register base, + Register regoffset, + Extend extend, + unsigned shift_amount) + : base_(base), + regoffset_(regoffset), + offset_(0), + addrmode_(Offset), + shift_(NO_SHIFT), + extend_(extend), + shift_amount_(shift_amount) { + VIXL_ASSERT(base.Is64Bits() && !base.IsZero()); + VIXL_ASSERT(!regoffset.IsSP()); + VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX)); + + // SXTX extend mode requires a 64-bit offset register. + VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX)); +} + + +MemOperand::MemOperand(Register base, + Register regoffset, + Shift shift, + unsigned shift_amount) + : base_(base), + regoffset_(regoffset), + offset_(0), + addrmode_(Offset), + shift_(shift), + extend_(NO_EXTEND), + shift_amount_(shift_amount) { + VIXL_ASSERT(base.Is64Bits() && !base.IsZero()); + VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP()); + VIXL_ASSERT(shift == LSL); +} + + +MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode) + : base_(base), + regoffset_(NoReg), + addrmode_(addrmode), + shift_(NO_SHIFT), + extend_(NO_EXTEND), + shift_amount_(0) { + VIXL_ASSERT(base.Is64Bits() && !base.IsZero()); + + if (offset.IsImmediate()) { + offset_ = offset.GetImmediate(); + } else if (offset.IsShiftedRegister()) { + VIXL_ASSERT((addrmode == Offset) || (addrmode == PostIndex)); + + regoffset_ = offset.GetRegister(); + shift_ = offset.GetShift(); + shift_amount_ = offset.GetShiftAmount(); + + extend_ = NO_EXTEND; + offset_ = 0; + + // These assertions match those in the shifted-register constructor. + VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP()); + VIXL_ASSERT(shift_ == LSL); + } else { + VIXL_ASSERT(offset.IsExtendedRegister()); + VIXL_ASSERT(addrmode == Offset); + + regoffset_ = offset.GetRegister(); + extend_ = offset.GetExtend(); + shift_amount_ = offset.GetShiftAmount(); + + shift_ = NO_SHIFT; + offset_ = 0; + + // These assertions match those in the extended-register constructor. + VIXL_ASSERT(!regoffset_.IsSP()); + VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX)); + VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX))); + } +} + + +bool MemOperand::IsImmediateOffset() const { + return (addrmode_ == Offset) && regoffset_.Is(NoReg); +} + + +bool MemOperand::IsRegisterOffset() const { + return (addrmode_ == Offset) && !regoffset_.Is(NoReg); +} + + +bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; } + + +bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; } + + +void MemOperand::AddOffset(int64_t offset) { + VIXL_ASSERT(IsImmediateOffset()); + offset_ += offset; +} + + +GenericOperand::GenericOperand(const CPURegister& reg) + : cpu_register_(reg), mem_op_size_(0) { + if (reg.IsQ()) { + VIXL_ASSERT(reg.GetSizeInBits() > static_cast(kXRegSize)); + // Support for Q registers is not implemented yet. + VIXL_UNIMPLEMENTED(); + } +} + + +GenericOperand::GenericOperand(const MemOperand& mem_op, size_t mem_op_size) + : cpu_register_(NoReg), mem_op_(mem_op), mem_op_size_(mem_op_size) { + if (mem_op_size_ > kXRegSizeInBytes) { + // We only support generic operands up to the size of X registers. + VIXL_UNIMPLEMENTED(); + } +} + +bool GenericOperand::Equals(const GenericOperand& other) const { + if (!IsValid() || !other.IsValid()) { + // Two invalid generic operands are considered equal. + return !IsValid() && !other.IsValid(); + } + if (IsCPURegister() && other.IsCPURegister()) { + return GetCPURegister().Is(other.GetCPURegister()); + } else if (IsMemOperand() && other.IsMemOperand()) { + return GetMemOperand().Equals(other.GetMemOperand()) && + (GetMemOperandSizeInBytes() == other.GetMemOperandSizeInBytes()); + } + return false; +} +} +} // namespace vixl::aarch64 diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/operands-aarch64.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/operands-aarch64.h new file mode 100644 index 00000000..e3dbfa3e --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/operands-aarch64.h @@ -0,0 +1,993 @@ +// Copyright 2016, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_OPERANDS_AARCH64_H_ +#define VIXL_AARCH64_OPERANDS_AARCH64_H_ + +#include "instructions-aarch64.h" + +namespace vixl { +namespace aarch64 { + +typedef uint64_t RegList; +static const int kRegListSizeInBits = sizeof(RegList) * 8; + + +// Registers. + +// Some CPURegister methods can return Register or VRegister types, so we need +// to declare them in advance. +class Register; +class VRegister; + +class CPURegister { + public: + enum RegisterType { + // The kInvalid value is used to detect uninitialized static instances, + // which are always zero-initialized before any constructors are called. + kInvalid = 0, + kRegister, + kVRegister, + kFPRegister = kVRegister, + kNoRegister + }; + + CPURegister() : code_(0), size_(0), type_(kNoRegister) { + VIXL_ASSERT(!IsValid()); + VIXL_ASSERT(IsNone()); + } + + CPURegister(unsigned code, unsigned size, RegisterType type) + : code_(code), size_(size), type_(type) { + VIXL_ASSERT(IsValidOrNone()); + } + + unsigned GetCode() const { + VIXL_ASSERT(IsValid()); + return code_; + } + VIXL_DEPRECATED("GetCode", unsigned code() const) { return GetCode(); } + + RegisterType GetType() const { + VIXL_ASSERT(IsValidOrNone()); + return type_; + } + VIXL_DEPRECATED("GetType", RegisterType type() const) { return GetType(); } + + RegList GetBit() const { + VIXL_ASSERT(code_ < (sizeof(RegList) * 8)); + return IsValid() ? (static_cast(1) << code_) : 0; + } + VIXL_DEPRECATED("GetBit", RegList Bit() const) { return GetBit(); } + + int GetSizeInBytes() const { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(size_ % 8 == 0); + return size_ / 8; + } + VIXL_DEPRECATED("GetSizeInBytes", int SizeInBytes() const) { + return GetSizeInBytes(); + } + + int GetSizeInBits() const { + VIXL_ASSERT(IsValid()); + return size_; + } + VIXL_DEPRECATED("GetSizeInBits", unsigned size() const) { + return GetSizeInBits(); + } + VIXL_DEPRECATED("GetSizeInBits", int SizeInBits() const) { + return GetSizeInBits(); + } + + bool Is8Bits() const { + VIXL_ASSERT(IsValid()); + return size_ == 8; + } + + bool Is16Bits() const { + VIXL_ASSERT(IsValid()); + return size_ == 16; + } + + bool Is32Bits() const { + VIXL_ASSERT(IsValid()); + return size_ == 32; + } + + bool Is64Bits() const { + VIXL_ASSERT(IsValid()); + return size_ == 64; + } + + bool Is128Bits() const { + VIXL_ASSERT(IsValid()); + return size_ == 128; + } + + bool IsValid() const { + if (IsValidRegister() || IsValidVRegister()) { + VIXL_ASSERT(!IsNone()); + return true; + } else { + // This assert is hit when the register has not been properly initialized. + // One cause for this can be an initialisation order fiasco. See + // https://isocpp.org/wiki/faq/ctors#static-init-order for some details. + VIXL_ASSERT(IsNone()); + return false; + } + } + + bool IsValidRegister() const { + return IsRegister() && ((size_ == kWRegSize) || (size_ == kXRegSize)) && + ((code_ < kNumberOfRegisters) || (code_ == kSPRegInternalCode)); + } + + bool IsValidVRegister() const { + return IsVRegister() && ((size_ == kBRegSize) || (size_ == kHRegSize) || + (size_ == kSRegSize) || (size_ == kDRegSize) || + (size_ == kQRegSize)) && + (code_ < kNumberOfVRegisters); + } + + bool IsValidFPRegister() const { + return IsFPRegister() && (code_ < kNumberOfVRegisters); + } + + bool IsNone() const { + // kNoRegister types should always have size 0 and code 0. + VIXL_ASSERT((type_ != kNoRegister) || (code_ == 0)); + VIXL_ASSERT((type_ != kNoRegister) || (size_ == 0)); + + return type_ == kNoRegister; + } + + bool Aliases(const CPURegister& other) const { + VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone()); + return (code_ == other.code_) && (type_ == other.type_); + } + + bool Is(const CPURegister& other) const { + VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone()); + return Aliases(other) && (size_ == other.size_); + } + + bool IsZero() const { + VIXL_ASSERT(IsValid()); + return IsRegister() && (code_ == kZeroRegCode); + } + + bool IsSP() const { + VIXL_ASSERT(IsValid()); + return IsRegister() && (code_ == kSPRegInternalCode); + } + + bool IsRegister() const { return type_ == kRegister; } + + bool IsVRegister() const { return type_ == kVRegister; } + + bool IsFPRegister() const { return IsS() || IsD(); } + + bool IsW() const { return IsValidRegister() && Is32Bits(); } + bool IsX() const { return IsValidRegister() && Is64Bits(); } + + // These assertions ensure that the size and type of the register are as + // described. They do not consider the number of lanes that make up a vector. + // So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD() + // does not imply Is1D() or Is8B(). + // Check the number of lanes, ie. the format of the vector, using methods such + // as Is8B(), Is1D(), etc. in the VRegister class. + bool IsV() const { return IsVRegister(); } + bool IsB() const { return IsV() && Is8Bits(); } + bool IsH() const { return IsV() && Is16Bits(); } + bool IsS() const { return IsV() && Is32Bits(); } + bool IsD() const { return IsV() && Is64Bits(); } + bool IsQ() const { return IsV() && Is128Bits(); } + + // Semantic type for sdot and udot instructions. + bool IsS4B() const { return IsS(); } + const VRegister& S4B() const { return S(); } + + const Register& W() const; + const Register& X() const; + const VRegister& V() const; + const VRegister& B() const; + const VRegister& H() const; + const VRegister& S() const; + const VRegister& D() const; + const VRegister& Q() const; + + bool IsSameType(const CPURegister& other) const { + return type_ == other.type_; + } + + bool IsSameSizeAndType(const CPURegister& other) const { + return (size_ == other.size_) && IsSameType(other); + } + + protected: + unsigned code_; + int size_; + RegisterType type_; + + private: + bool IsValidOrNone() const { return IsValid() || IsNone(); } +}; + + +class Register : public CPURegister { + public: + Register() : CPURegister() {} + explicit Register(const CPURegister& other) + : CPURegister(other.GetCode(), other.GetSizeInBits(), other.GetType()) { + VIXL_ASSERT(IsValidRegister()); + } + Register(unsigned code, unsigned size) : CPURegister(code, size, kRegister) {} + + bool IsValid() const { + VIXL_ASSERT(IsRegister() || IsNone()); + return IsValidRegister(); + } + + static const Register& GetWRegFromCode(unsigned code); + VIXL_DEPRECATED("GetWRegFromCode", + static const Register& WRegFromCode(unsigned code)) { + return GetWRegFromCode(code); + } + + static const Register& GetXRegFromCode(unsigned code); + VIXL_DEPRECATED("GetXRegFromCode", + static const Register& XRegFromCode(unsigned code)) { + return GetXRegFromCode(code); + } + + private: + static const Register wregisters[]; + static const Register xregisters[]; +}; + + +namespace internal { + +template +class FixedSizeRegister : public Register { + public: + FixedSizeRegister() : Register() {} + explicit FixedSizeRegister(unsigned code) : Register(code, size_in_bits) { + VIXL_ASSERT(IsValidRegister()); + } + explicit FixedSizeRegister(const Register& other) + : Register(other.GetCode(), size_in_bits) { + VIXL_ASSERT(other.GetSizeInBits() == size_in_bits); + VIXL_ASSERT(IsValidRegister()); + } + explicit FixedSizeRegister(const CPURegister& other) + : Register(other.GetCode(), other.GetSizeInBits()) { + VIXL_ASSERT(other.GetType() == kRegister); + VIXL_ASSERT(other.GetSizeInBits() == size_in_bits); + VIXL_ASSERT(IsValidRegister()); + } + + bool IsValid() const { + return Register::IsValid() && (GetSizeInBits() == size_in_bits); + } +}; + +} // namespace internal + +typedef internal::FixedSizeRegister XRegister; +typedef internal::FixedSizeRegister WRegister; + + +class VRegister : public CPURegister { + public: + VRegister() : CPURegister(), lanes_(1) {} + explicit VRegister(const CPURegister& other) + : CPURegister(other.GetCode(), other.GetSizeInBits(), other.GetType()), + lanes_(1) { + VIXL_ASSERT(IsValidVRegister()); + VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16)); + } + VRegister(unsigned code, unsigned size, unsigned lanes = 1) + : CPURegister(code, size, kVRegister), lanes_(lanes) { + VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16)); + } + VRegister(unsigned code, VectorFormat format) + : CPURegister(code, RegisterSizeInBitsFromFormat(format), kVRegister), + lanes_(IsVectorFormat(format) ? LaneCountFromFormat(format) : 1) { + VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16)); + } + + bool IsValid() const { + VIXL_ASSERT(IsVRegister() || IsNone()); + return IsValidVRegister(); + } + + static const VRegister& GetBRegFromCode(unsigned code); + VIXL_DEPRECATED("GetBRegFromCode", + static const VRegister& BRegFromCode(unsigned code)) { + return GetBRegFromCode(code); + } + + static const VRegister& GetHRegFromCode(unsigned code); + VIXL_DEPRECATED("GetHRegFromCode", + static const VRegister& HRegFromCode(unsigned code)) { + return GetHRegFromCode(code); + } + + static const VRegister& GetSRegFromCode(unsigned code); + VIXL_DEPRECATED("GetSRegFromCode", + static const VRegister& SRegFromCode(unsigned code)) { + return GetSRegFromCode(code); + } + + static const VRegister& GetDRegFromCode(unsigned code); + VIXL_DEPRECATED("GetDRegFromCode", + static const VRegister& DRegFromCode(unsigned code)) { + return GetDRegFromCode(code); + } + + static const VRegister& GetQRegFromCode(unsigned code); + VIXL_DEPRECATED("GetQRegFromCode", + static const VRegister& QRegFromCode(unsigned code)) { + return GetQRegFromCode(code); + } + + static const VRegister& GetVRegFromCode(unsigned code); + VIXL_DEPRECATED("GetVRegFromCode", + static const VRegister& VRegFromCode(unsigned code)) { + return GetVRegFromCode(code); + } + + VRegister V8B() const { return VRegister(code_, kDRegSize, 8); } + VRegister V16B() const { return VRegister(code_, kQRegSize, 16); } + VRegister V2H() const { return VRegister(code_, kSRegSize, 2); } + VRegister V4H() const { return VRegister(code_, kDRegSize, 4); } + VRegister V8H() const { return VRegister(code_, kQRegSize, 8); } + VRegister V2S() const { return VRegister(code_, kDRegSize, 2); } + VRegister V4S() const { return VRegister(code_, kQRegSize, 4); } + VRegister V2D() const { return VRegister(code_, kQRegSize, 2); } + VRegister V1D() const { return VRegister(code_, kDRegSize, 1); } + + bool Is8B() const { return (Is64Bits() && (lanes_ == 8)); } + bool Is16B() const { return (Is128Bits() && (lanes_ == 16)); } + bool Is2H() const { return (Is32Bits() && (lanes_ == 2)); } + bool Is4H() const { return (Is64Bits() && (lanes_ == 4)); } + bool Is8H() const { return (Is128Bits() && (lanes_ == 8)); } + bool Is2S() const { return (Is64Bits() && (lanes_ == 2)); } + bool Is4S() const { return (Is128Bits() && (lanes_ == 4)); } + bool Is1D() const { return (Is64Bits() && (lanes_ == 1)); } + bool Is2D() const { return (Is128Bits() && (lanes_ == 2)); } + + // For consistency, we assert the number of lanes of these scalar registers, + // even though there are no vectors of equivalent total size with which they + // could alias. + bool Is1B() const { + VIXL_ASSERT(!(Is8Bits() && IsVector())); + return Is8Bits(); + } + bool Is1H() const { + VIXL_ASSERT(!(Is16Bits() && IsVector())); + return Is16Bits(); + } + bool Is1S() const { + VIXL_ASSERT(!(Is32Bits() && IsVector())); + return Is32Bits(); + } + + // Semantic type for sdot and udot instructions. + bool Is1S4B() const { return Is1S(); } + + + bool IsLaneSizeB() const { return GetLaneSizeInBits() == kBRegSize; } + bool IsLaneSizeH() const { return GetLaneSizeInBits() == kHRegSize; } + bool IsLaneSizeS() const { return GetLaneSizeInBits() == kSRegSize; } + bool IsLaneSizeD() const { return GetLaneSizeInBits() == kDRegSize; } + + int GetLanes() const { return lanes_; } + VIXL_DEPRECATED("GetLanes", int lanes() const) { return GetLanes(); } + + bool IsScalar() const { return lanes_ == 1; } + + bool IsVector() const { return lanes_ > 1; } + + bool IsSameFormat(const VRegister& other) const { + return (size_ == other.size_) && (lanes_ == other.lanes_); + } + + unsigned GetLaneSizeInBytes() const { return GetSizeInBytes() / lanes_; } + VIXL_DEPRECATED("GetLaneSizeInBytes", unsigned LaneSizeInBytes() const) { + return GetLaneSizeInBytes(); + } + + unsigned GetLaneSizeInBits() const { return GetLaneSizeInBytes() * 8; } + VIXL_DEPRECATED("GetLaneSizeInBits", unsigned LaneSizeInBits() const) { + return GetLaneSizeInBits(); + } + + private: + static const VRegister bregisters[]; + static const VRegister hregisters[]; + static const VRegister sregisters[]; + static const VRegister dregisters[]; + static const VRegister qregisters[]; + static const VRegister vregisters[]; + int lanes_; +}; + + +// Backward compatibility for FPRegisters. +typedef VRegister FPRegister; + +// No*Reg is used to indicate an unused argument, or an error case. Note that +// these all compare equal (using the Is() method). The Register and VRegister +// variants are provided for convenience. +const Register NoReg; +const VRegister NoVReg; +const FPRegister NoFPReg; // For backward compatibility. +const CPURegister NoCPUReg; + + +#define DEFINE_REGISTERS(N) \ + const WRegister w##N(N); \ + const XRegister x##N(N); +AARCH64_REGISTER_CODE_LIST(DEFINE_REGISTERS) +#undef DEFINE_REGISTERS +const WRegister wsp(kSPRegInternalCode); +const XRegister sp(kSPRegInternalCode); + + +#define DEFINE_VREGISTERS(N) \ + const VRegister b##N(N, kBRegSize); \ + const VRegister h##N(N, kHRegSize); \ + const VRegister s##N(N, kSRegSize); \ + const VRegister d##N(N, kDRegSize); \ + const VRegister q##N(N, kQRegSize); \ + const VRegister v##N(N, kQRegSize); +AARCH64_REGISTER_CODE_LIST(DEFINE_VREGISTERS) +#undef DEFINE_VREGISTERS + + +// Register aliases. +const XRegister ip0 = x16; +const XRegister ip1 = x17; +const XRegister lr = x30; +const XRegister xzr = x31; +const WRegister wzr = w31; + + +// AreAliased returns true if any of the named registers overlap. Arguments +// set to NoReg are ignored. The system stack pointer may be specified. +bool AreAliased(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3 = NoReg, + const CPURegister& reg4 = NoReg, + const CPURegister& reg5 = NoReg, + const CPURegister& reg6 = NoReg, + const CPURegister& reg7 = NoReg, + const CPURegister& reg8 = NoReg); + + +// AreSameSizeAndType returns true if all of the specified registers have the +// same size, and are of the same type. The system stack pointer may be +// specified. Arguments set to NoReg are ignored, as are any subsequent +// arguments. At least one argument (reg1) must be valid (not NoCPUReg). +bool AreSameSizeAndType(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3 = NoCPUReg, + const CPURegister& reg4 = NoCPUReg, + const CPURegister& reg5 = NoCPUReg, + const CPURegister& reg6 = NoCPUReg, + const CPURegister& reg7 = NoCPUReg, + const CPURegister& reg8 = NoCPUReg); + +// AreEven returns true if all of the specified registers have even register +// indices. Arguments set to NoReg are ignored, as are any subsequent +// arguments. At least one argument (reg1) must be valid (not NoCPUReg). +bool AreEven(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3 = NoReg, + const CPURegister& reg4 = NoReg, + const CPURegister& reg5 = NoReg, + const CPURegister& reg6 = NoReg, + const CPURegister& reg7 = NoReg, + const CPURegister& reg8 = NoReg); + + +// AreConsecutive returns true if all of the specified registers are +// consecutive in the register file. Arguments set to NoReg are ignored, as are +// any subsequent arguments. At least one argument (reg1) must be valid +// (not NoCPUReg). +bool AreConsecutive(const CPURegister& reg1, + const CPURegister& reg2, + const CPURegister& reg3 = NoCPUReg, + const CPURegister& reg4 = NoCPUReg); + + +// AreSameFormat returns true if all of the specified VRegisters have the same +// vector format. Arguments set to NoReg are ignored, as are any subsequent +// arguments. At least one argument (reg1) must be valid (not NoVReg). +bool AreSameFormat(const VRegister& reg1, + const VRegister& reg2, + const VRegister& reg3 = NoVReg, + const VRegister& reg4 = NoVReg); + + +// AreConsecutive returns true if all of the specified VRegisters are +// consecutive in the register file. Arguments set to NoReg are ignored, as are +// any subsequent arguments. At least one argument (reg1) must be valid +// (not NoVReg). +bool AreConsecutive(const VRegister& reg1, + const VRegister& reg2, + const VRegister& reg3 = NoVReg, + const VRegister& reg4 = NoVReg); + + +// Lists of registers. +class CPURegList { + public: + explicit CPURegList(CPURegister reg1, + CPURegister reg2 = NoCPUReg, + CPURegister reg3 = NoCPUReg, + CPURegister reg4 = NoCPUReg) + : list_(reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit()), + size_(reg1.GetSizeInBits()), + type_(reg1.GetType()) { + VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4)); + VIXL_ASSERT(IsValid()); + } + + CPURegList(CPURegister::RegisterType type, unsigned size, RegList list) + : list_(list), size_(size), type_(type) { + VIXL_ASSERT(IsValid()); + } + + CPURegList(CPURegister::RegisterType type, + unsigned size, + unsigned first_reg, + unsigned last_reg) + : size_(size), type_(type) { + VIXL_ASSERT( + ((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) || + ((type == CPURegister::kVRegister) && + (last_reg < kNumberOfVRegisters))); + VIXL_ASSERT(last_reg >= first_reg); + list_ = (UINT64_C(1) << (last_reg + 1)) - 1; + list_ &= ~((UINT64_C(1) << first_reg) - 1); + VIXL_ASSERT(IsValid()); + } + + CPURegister::RegisterType GetType() const { + VIXL_ASSERT(IsValid()); + return type_; + } + VIXL_DEPRECATED("GetType", CPURegister::RegisterType type() const) { + return GetType(); + } + + // Combine another CPURegList into this one. Registers that already exist in + // this list are left unchanged. The type and size of the registers in the + // 'other' list must match those in this list. + void Combine(const CPURegList& other) { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(other.GetType() == type_); + VIXL_ASSERT(other.GetRegisterSizeInBits() == size_); + list_ |= other.GetList(); + } + + // Remove every register in the other CPURegList from this one. Registers that + // do not exist in this list are ignored. The type and size of the registers + // in the 'other' list must match those in this list. + void Remove(const CPURegList& other) { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(other.GetType() == type_); + VIXL_ASSERT(other.GetRegisterSizeInBits() == size_); + list_ &= ~other.GetList(); + } + + // Variants of Combine and Remove which take a single register. + void Combine(const CPURegister& other) { + VIXL_ASSERT(other.GetType() == type_); + VIXL_ASSERT(other.GetSizeInBits() == size_); + Combine(other.GetCode()); + } + + void Remove(const CPURegister& other) { + VIXL_ASSERT(other.GetType() == type_); + VIXL_ASSERT(other.GetSizeInBits() == size_); + Remove(other.GetCode()); + } + + // Variants of Combine and Remove which take a single register by its code; + // the type and size of the register is inferred from this list. + void Combine(int code) { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(CPURegister(code, size_, type_).IsValid()); + list_ |= (UINT64_C(1) << code); + } + + void Remove(int code) { + VIXL_ASSERT(IsValid()); + VIXL_ASSERT(CPURegister(code, size_, type_).IsValid()); + list_ &= ~(UINT64_C(1) << code); + } + + static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) { + VIXL_ASSERT(list_1.type_ == list_2.type_); + VIXL_ASSERT(list_1.size_ == list_2.size_); + return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_); + } + static CPURegList Union(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3); + static CPURegList Union(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3, + const CPURegList& list_4); + + static CPURegList Intersection(const CPURegList& list_1, + const CPURegList& list_2) { + VIXL_ASSERT(list_1.type_ == list_2.type_); + VIXL_ASSERT(list_1.size_ == list_2.size_); + return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_); + } + static CPURegList Intersection(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3); + static CPURegList Intersection(const CPURegList& list_1, + const CPURegList& list_2, + const CPURegList& list_3, + const CPURegList& list_4); + + bool Overlaps(const CPURegList& other) const { + return (type_ == other.type_) && ((list_ & other.list_) != 0); + } + + RegList GetList() const { + VIXL_ASSERT(IsValid()); + return list_; + } + VIXL_DEPRECATED("GetList", RegList list() const) { return GetList(); } + + void SetList(RegList new_list) { + VIXL_ASSERT(IsValid()); + list_ = new_list; + } + VIXL_DEPRECATED("SetList", void set_list(RegList new_list)) { + return SetList(new_list); + } + + // Remove all callee-saved registers from the list. This can be useful when + // preparing registers for an AAPCS64 function call, for example. + void RemoveCalleeSaved(); + + CPURegister PopLowestIndex(); + CPURegister PopHighestIndex(); + + // AAPCS64 callee-saved registers. + static CPURegList GetCalleeSaved(unsigned size = kXRegSize); + static CPURegList GetCalleeSavedV(unsigned size = kDRegSize); + + // AAPCS64 caller-saved registers. Note that this includes lr. + // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top + // 64-bits being caller-saved. + static CPURegList GetCallerSaved(unsigned size = kXRegSize); + static CPURegList GetCallerSavedV(unsigned size = kDRegSize); + + bool IsEmpty() const { + VIXL_ASSERT(IsValid()); + return list_ == 0; + } + + bool IncludesAliasOf(const CPURegister& other) const { + VIXL_ASSERT(IsValid()); + return (type_ == other.GetType()) && ((other.GetBit() & list_) != 0); + } + + bool IncludesAliasOf(int code) const { + VIXL_ASSERT(IsValid()); + return ((code & list_) != 0); + } + + int GetCount() const { + VIXL_ASSERT(IsValid()); + return CountSetBits(list_); + } + VIXL_DEPRECATED("GetCount", int Count()) const { return GetCount(); } + + int GetRegisterSizeInBits() const { + VIXL_ASSERT(IsValid()); + return size_; + } + VIXL_DEPRECATED("GetRegisterSizeInBits", int RegisterSizeInBits() const) { + return GetRegisterSizeInBits(); + } + + int GetRegisterSizeInBytes() const { + int size_in_bits = GetRegisterSizeInBits(); + VIXL_ASSERT((size_in_bits % 8) == 0); + return size_in_bits / 8; + } + VIXL_DEPRECATED("GetRegisterSizeInBytes", int RegisterSizeInBytes() const) { + return GetRegisterSizeInBytes(); + } + + unsigned GetTotalSizeInBytes() const { + VIXL_ASSERT(IsValid()); + return GetRegisterSizeInBytes() * GetCount(); + } + VIXL_DEPRECATED("GetTotalSizeInBytes", unsigned TotalSizeInBytes() const) { + return GetTotalSizeInBytes(); + } + + private: + RegList list_; + int size_; + CPURegister::RegisterType type_; + + bool IsValid() const; +}; + + +// AAPCS64 callee-saved registers. +extern const CPURegList kCalleeSaved; +extern const CPURegList kCalleeSavedV; + + +// AAPCS64 caller-saved registers. Note that this includes lr. +extern const CPURegList kCallerSaved; +extern const CPURegList kCallerSavedV; + + +// Operand. +class Operand { + public: + // # + // where is int64_t. + // This is allowed to be an implicit constructor because Operand is + // a wrapper class that doesn't normally perform any type conversion. + Operand(int64_t immediate = 0); // NOLINT(runtime/explicit) + + // rm, { #} + // where is one of {LSL, LSR, ASR, ROR}. + // is uint6_t. + // This is allowed to be an implicit constructor because Operand is + // a wrapper class that doesn't normally perform any type conversion. + Operand(Register reg, + Shift shift = LSL, + unsigned shift_amount = 0); // NOLINT(runtime/explicit) + + // rm, { {#}} + // where is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}. + // is uint2_t. + explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0); + + bool IsImmediate() const; + bool IsPlainRegister() const; + bool IsShiftedRegister() const; + bool IsExtendedRegister() const; + bool IsZero() const; + + // This returns an LSL shift (<= 4) operand as an equivalent extend operand, + // which helps in the encoding of instructions that use the stack pointer. + Operand ToExtendedRegister() const; + + int64_t GetImmediate() const { + VIXL_ASSERT(IsImmediate()); + return immediate_; + } + VIXL_DEPRECATED("GetImmediate", int64_t immediate() const) { + return GetImmediate(); + } + + int64_t GetEquivalentImmediate() const { + return IsZero() ? 0 : GetImmediate(); + } + + Register GetRegister() const { + VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister()); + return reg_; + } + VIXL_DEPRECATED("GetRegister", Register reg() const) { return GetRegister(); } + Register GetBaseRegister() const { return GetRegister(); } + + Shift GetShift() const { + VIXL_ASSERT(IsShiftedRegister()); + return shift_; + } + VIXL_DEPRECATED("GetShift", Shift shift() const) { return GetShift(); } + + Extend GetExtend() const { + VIXL_ASSERT(IsExtendedRegister()); + return extend_; + } + VIXL_DEPRECATED("GetExtend", Extend extend() const) { return GetExtend(); } + + unsigned GetShiftAmount() const { + VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister()); + return shift_amount_; + } + VIXL_DEPRECATED("GetShiftAmount", unsigned shift_amount() const) { + return GetShiftAmount(); + } + + private: + int64_t immediate_; + Register reg_; + Shift shift_; + Extend extend_; + unsigned shift_amount_; +}; + + +// MemOperand represents the addressing mode of a load or store instruction. +class MemOperand { + public: + // Creates an invalid `MemOperand`. + MemOperand(); + explicit MemOperand(Register base, + int64_t offset = 0, + AddrMode addrmode = Offset); + MemOperand(Register base, + Register regoffset, + Shift shift = LSL, + unsigned shift_amount = 0); + MemOperand(Register base, + Register regoffset, + Extend extend, + unsigned shift_amount = 0); + MemOperand(Register base, const Operand& offset, AddrMode addrmode = Offset); + + const Register& GetBaseRegister() const { return base_; } + VIXL_DEPRECATED("GetBaseRegister", const Register& base() const) { + return GetBaseRegister(); + } + + const Register& GetRegisterOffset() const { return regoffset_; } + VIXL_DEPRECATED("GetRegisterOffset", const Register& regoffset() const) { + return GetRegisterOffset(); + } + + int64_t GetOffset() const { return offset_; } + VIXL_DEPRECATED("GetOffset", int64_t offset() const) { return GetOffset(); } + + AddrMode GetAddrMode() const { return addrmode_; } + VIXL_DEPRECATED("GetAddrMode", AddrMode addrmode() const) { + return GetAddrMode(); + } + + Shift GetShift() const { return shift_; } + VIXL_DEPRECATED("GetShift", Shift shift() const) { return GetShift(); } + + Extend GetExtend() const { return extend_; } + VIXL_DEPRECATED("GetExtend", Extend extend() const) { return GetExtend(); } + + unsigned GetShiftAmount() const { return shift_amount_; } + VIXL_DEPRECATED("GetShiftAmount", unsigned shift_amount() const) { + return GetShiftAmount(); + } + + bool IsImmediateOffset() const; + bool IsRegisterOffset() const; + bool IsPreIndex() const; + bool IsPostIndex() const; + + void AddOffset(int64_t offset); + + bool IsValid() const { + return base_.IsValid() && + ((addrmode_ == Offset) || (addrmode_ == PreIndex) || + (addrmode_ == PostIndex)) && + ((shift_ == NO_SHIFT) || (extend_ == NO_EXTEND)) && + ((offset_ == 0) || !regoffset_.IsValid()); + } + + bool Equals(const MemOperand& other) const { + return base_.Is(other.base_) && regoffset_.Is(other.regoffset_) && + (offset_ == other.offset_) && (addrmode_ == other.addrmode_) && + (shift_ == other.shift_) && (extend_ == other.extend_) && + (shift_amount_ == other.shift_amount_); + } + + private: + Register base_; + Register regoffset_; + int64_t offset_; + AddrMode addrmode_; + Shift shift_; + Extend extend_; + unsigned shift_amount_; +}; + +// This an abstraction that can represent a register or memory location. The +// `MacroAssembler` provides helpers to move data between generic operands. +class GenericOperand { + public: + GenericOperand() { VIXL_ASSERT(!IsValid()); } + GenericOperand(const CPURegister& reg); // NOLINT(runtime/explicit) + GenericOperand(const MemOperand& mem_op, + size_t mem_op_size = 0); // NOLINT(runtime/explicit) + + bool IsValid() const { return cpu_register_.IsValid() != mem_op_.IsValid(); } + + bool Equals(const GenericOperand& other) const; + + bool IsCPURegister() const { + VIXL_ASSERT(IsValid()); + return cpu_register_.IsValid(); + } + + bool IsRegister() const { + return IsCPURegister() && cpu_register_.IsRegister(); + } + + bool IsVRegister() const { + return IsCPURegister() && cpu_register_.IsVRegister(); + } + + bool IsSameCPURegisterType(const GenericOperand& other) { + return IsCPURegister() && other.IsCPURegister() && + GetCPURegister().IsSameType(other.GetCPURegister()); + } + + bool IsMemOperand() const { + VIXL_ASSERT(IsValid()); + return mem_op_.IsValid(); + } + + CPURegister GetCPURegister() const { + VIXL_ASSERT(IsCPURegister()); + return cpu_register_; + } + + MemOperand GetMemOperand() const { + VIXL_ASSERT(IsMemOperand()); + return mem_op_; + } + + size_t GetMemOperandSizeInBytes() const { + VIXL_ASSERT(IsMemOperand()); + return mem_op_size_; + } + + size_t GetSizeInBytes() const { + return IsCPURegister() ? cpu_register_.GetSizeInBytes() + : GetMemOperandSizeInBytes(); + } + + size_t GetSizeInBits() const { return GetSizeInBytes() * kBitsPerByte; } + + private: + CPURegister cpu_register_; + MemOperand mem_op_; + // The size of the memory region pointed to, in bytes. + // We only support sizes up to X/D register sizes. + size_t mem_op_size_; +}; +} +} // namespace vixl::aarch64 + +#endif // VIXL_AARCH64_OPERANDS_AARCH64_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/pointer-auth-aarch64.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/pointer-auth-aarch64.cc new file mode 100644 index 00000000..55cf4ca5 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/pointer-auth-aarch64.cc @@ -0,0 +1,197 @@ +// Copyright 2018, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64 + +#include "simulator-aarch64.h" + +#include "utils-vixl.h" + +namespace vixl { +namespace aarch64 { + +// Randomly generated example keys for simulating only. +const Simulator::PACKey Simulator::kPACKeyIA = {0xc31718727de20f71, + 0xab9fd4e14b2fec51, + 0}; +const Simulator::PACKey Simulator::kPACKeyIB = {0xeebb163b474e04c8, + 0x5267ac6fc280fb7c, + 1}; +const Simulator::PACKey Simulator::kPACKeyDA = {0x5caef808deb8b1e2, + 0xd347cbc06b7b0f77, + 0}; +const Simulator::PACKey Simulator::kPACKeyDB = {0xe06aa1a949ba8cc7, + 0xcfde69e3db6d0432, + 1}; + +// The general PAC key isn't intended to be used with AuthPAC so we ensure the +// key number is invalid and asserts if used incorrectly. +const Simulator::PACKey Simulator::kPACKeyGA = {0xfcd98a44d564b3d5, + 0x6c56df1904bf0ddc, + -1}; + +static uint64_t GetNibble(uint64_t in_data, int position) { + return (in_data >> position) & 0xf; +} + +static uint64_t ShuffleNibbles(uint64_t in_data) { + static int in_positions[16] = + {4, 36, 52, 40, 44, 0, 24, 12, 56, 60, 8, 32, 16, 28, 20, 48}; + uint64_t out_data = 0; + for (int i = 0; i < 16; i++) { + out_data |= GetNibble(in_data, in_positions[i]) << (4 * i); + } + return out_data; +} + +static uint64_t SubstituteNibbles(uint64_t in_data) { + // Randomly chosen substitutes. + static uint64_t subs[16] = + {4, 7, 3, 9, 10, 14, 0, 1, 15, 2, 8, 6, 12, 5, 11, 13}; + uint64_t out_data = 0; + for (int i = 0; i < 16; i++) { + int index = (in_data >> (4 * i)) & 0xf; + out_data |= subs[index] << (4 * i); + } + return out_data; +} + +// Rotate nibble to the left by the amount specified. +static uint64_t RotNibble(uint64_t in_cell, int amount) { + VIXL_ASSERT((amount >= 0) && (amount <= 3)); + + in_cell &= 0xf; + uint64_t temp = (in_cell << 4) | in_cell; + return (temp >> (4 - amount)) & 0xf; +} + +static uint64_t BigShuffle(uint64_t in_data) { + uint64_t out_data = 0; + for (int i = 0; i < 4; i++) { + uint64_t n12 = GetNibble(in_data, 4 * (i + 12)); + uint64_t n8 = GetNibble(in_data, 4 * (i + 8)); + uint64_t n4 = GetNibble(in_data, 4 * (i + 4)); + uint64_t n0 = GetNibble(in_data, 4 * (i + 0)); + + uint64_t t0 = RotNibble(n8, 2) ^ RotNibble(n4, 1) ^ RotNibble(n0, 1); + uint64_t t1 = RotNibble(n12, 1) ^ RotNibble(n4, 2) ^ RotNibble(n0, 1); + uint64_t t2 = RotNibble(n12, 2) ^ RotNibble(n8, 1) ^ RotNibble(n0, 1); + uint64_t t3 = RotNibble(n12, 1) ^ RotNibble(n8, 1) ^ RotNibble(n4, 2); + + out_data |= t3 << (4 * (i + 0)); + out_data |= t2 << (4 * (i + 4)); + out_data |= t1 << (4 * (i + 8)); + out_data |= t0 << (4 * (i + 12)); + } + return out_data; +} + +// A simple, non-standard hash function invented for simulating. It mixes +// reasonably well, however it is unlikely to be cryptographically secure and +// may have a higher collision chance than other hashing algorithms. +uint64_t Simulator::ComputePAC(uint64_t data, uint64_t context, PACKey key) { + uint64_t working_value = data ^ key.high; + working_value = BigShuffle(working_value); + working_value = ShuffleNibbles(working_value); + working_value ^= key.low; + working_value = ShuffleNibbles(working_value); + working_value = BigShuffle(working_value); + working_value ^= context; + working_value = SubstituteNibbles(working_value); + working_value = BigShuffle(working_value); + working_value = SubstituteNibbles(working_value); + + return working_value; +} + +// The TTBR is selected by bit 63 or 55 depending on TBI for pointers without +// codes, but is always 55 once a PAC code is added to a pointer. For this +// reason, it must be calculated at the call site. +uint64_t Simulator::CalculatePACMask(uint64_t ptr, PointerType type, int ttbr) { + int bottom_pac_bit = GetBottomPACBit(ptr, ttbr); + int top_pac_bit = GetTopPACBit(ptr, type); + return ExtractUnsignedBitfield64(top_pac_bit, + bottom_pac_bit, + 0xffffffffffffffff & ~kTTBRMask) + << bottom_pac_bit; +} + +uint64_t Simulator::AuthPAC(uint64_t ptr, + uint64_t context, + PACKey key, + PointerType type) { + VIXL_ASSERT((key.number == 0) || (key.number == 1)); + + uint64_t pac_mask = CalculatePACMask(ptr, type, (ptr >> 55) & 1); + uint64_t original_ptr = + ((ptr & kTTBRMask) == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask); + + uint64_t pac = ComputePAC(original_ptr, context, key); + + uint64_t error_code = 1 << key.number; + if ((pac & pac_mask) == (ptr & pac_mask)) { + return original_ptr; + } else { + int error_lsb = GetTopPACBit(ptr, type) - 2; + uint64_t error_mask = UINT64_C(0x3) << error_lsb; + return (original_ptr & ~error_mask) | (error_code << error_lsb); + } +} + +uint64_t Simulator::AddPAC(uint64_t ptr, + uint64_t context, + PACKey key, + PointerType type) { + int top_pac_bit = GetTopPACBit(ptr, type); + + // TODO: Properly handle the case where extension bits are bad and TBI is + // turned off, and also test me. + VIXL_ASSERT(HasTBI(ptr, type)); + int ttbr = (ptr >> 55) & 1; + uint64_t pac_mask = CalculatePACMask(ptr, type, ttbr); + uint64_t ext_ptr = (ttbr == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask); + + uint64_t pac = ComputePAC(ext_ptr, context, key); + + // If the pointer isn't all zeroes or all ones in the PAC bitfield, corrupt + // the resulting code. + if (((ptr & (pac_mask | kTTBRMask)) != 0x0) && + ((~ptr & (pac_mask | kTTBRMask)) != 0x0)) { + pac ^= UINT64_C(1) << (top_pac_bit - 1); + } + + uint64_t ttbr_shifted = static_cast(ttbr) << 55; + return (pac & pac_mask) | ttbr_shifted | (ptr & ~pac_mask); +} + +uint64_t Simulator::StripPAC(uint64_t ptr, PointerType type) { + uint64_t pac_mask = CalculatePACMask(ptr, type, (ptr >> 55) & 1); + return ((ptr & kTTBRMask) == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask); +} +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_INCLUDE_SIMULATOR_AARCH64 diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/simulator-aarch64.cc b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/simulator-aarch64.cc new file mode 100644 index 00000000..b682d07c --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/simulator-aarch64.cc @@ -0,0 +1,6940 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64 + +#include +#include +#include + +#include "simulator-aarch64.h" + +namespace vixl { +namespace aarch64 { + +using vixl::internal::SimFloat16; + +const Instruction* Simulator::kEndOfSimAddress = NULL; + +void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) { + int width = msb - lsb + 1; + VIXL_ASSERT(IsUintN(width, bits) || IsIntN(width, bits)); + + bits <<= lsb; + uint32_t mask = ((1 << width) - 1) << lsb; + VIXL_ASSERT((mask & write_ignore_mask_) == 0); + + value_ = (value_ & ~mask) | (bits & mask); +} + + +SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) { + switch (id) { + case NZCV: + return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask); + case FPCR: + return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask); + default: + VIXL_UNREACHABLE(); + return SimSystemRegister(); + } +} + + +Simulator::Simulator(Decoder* decoder, FILE* stream) + : cpu_features_auditor_(decoder, CPUFeatures::All()) { + // Ensure that shift operations act as the simulator expects. + VIXL_ASSERT((static_cast(-1) >> 1) == -1); + VIXL_ASSERT((static_cast(-1) >> 1) == 0x7fffffff); + + instruction_stats_ = false; + + // Set up the decoder. + decoder_ = decoder; + decoder_->AppendVisitor(this); + + stream_ = stream; + + print_disasm_ = new PrintDisassembler(stream_); + // The Simulator and Disassembler share the same available list, held by the + // auditor. The Disassembler only annotates instructions with features that + // are _not_ available, so registering the auditor should have no effect + // unless the simulator is about to abort (due to missing features). In + // practice, this means that with trace enabled, the simulator will crash just + // after the disassembler prints the instruction, with the missing features + // enumerated. + print_disasm_->RegisterCPUFeaturesAuditor(&cpu_features_auditor_); + + SetColouredTrace(false); + trace_parameters_ = LOG_NONE; + + ResetState(); + + // Allocate and set up the simulator stack. + stack_ = new byte[stack_size_]; + stack_limit_ = stack_ + stack_protection_size_; + // Configure the starting stack pointer. + // - Find the top of the stack. + byte* tos = stack_ + stack_size_; + // - There's a protection region at both ends of the stack. + tos -= stack_protection_size_; + // - The stack pointer must be 16-byte aligned. + tos = AlignDown(tos, 16); + WriteSp(tos); + + instrumentation_ = NULL; + + // Print a warning about exclusive-access instructions, but only the first + // time they are encountered. This warning can be silenced using + // SilenceExclusiveAccessWarning(). + print_exclusive_access_warning_ = true; + + guard_pages_ = false; +} + + +void Simulator::ResetState() { + // Reset the system registers. + nzcv_ = SimSystemRegister::DefaultValueFor(NZCV); + fpcr_ = SimSystemRegister::DefaultValueFor(FPCR); + + // Reset registers to 0. + pc_ = NULL; + pc_modified_ = false; + for (unsigned i = 0; i < kNumberOfRegisters; i++) { + WriteXRegister(i, 0xbadbeef); + } + // Set FP registers to a value that is a NaN in both 32-bit and 64-bit FP. + uint64_t nan_bits[] = { + UINT64_C(0x7ff00cab7f8ba9e1), UINT64_C(0x7ff0dead7f8beef1), + }; + VIXL_ASSERT(IsSignallingNaN(RawbitsToDouble(nan_bits[0] & kDRegMask))); + VIXL_ASSERT(IsSignallingNaN(RawbitsToFloat(nan_bits[0] & kSRegMask))); + + qreg_t q_bits; + VIXL_ASSERT(sizeof(q_bits) == sizeof(nan_bits)); + memcpy(&q_bits, nan_bits, sizeof(nan_bits)); + + for (unsigned i = 0; i < kNumberOfVRegisters; i++) { + WriteQRegister(i, q_bits); + } + // Returning to address 0 exits the Simulator. + WriteLr(kEndOfSimAddress); + + btype_ = DefaultBType; + next_btype_ = DefaultBType; +} + + +Simulator::~Simulator() { + delete[] stack_; + // The decoder may outlive the simulator. + decoder_->RemoveVisitor(print_disasm_); + delete print_disasm_; + + decoder_->RemoveVisitor(instrumentation_); + delete instrumentation_; +} + + +void Simulator::Run() { + // Flush any written registers before executing anything, so that + // manually-set registers are logged _before_ the first instruction. + LogAllWrittenRegisters(); + + while (pc_ != kEndOfSimAddress) { + ExecuteInstruction(); + } +} + + +void Simulator::RunFrom(const Instruction* first) { + WritePc(first, NoBranchLog); + Run(); +} + + +const char* Simulator::xreg_names[] = {"x0", "x1", "x2", "x3", "x4", "x5", + "x6", "x7", "x8", "x9", "x10", "x11", + "x12", "x13", "x14", "x15", "x16", "x17", + "x18", "x19", "x20", "x21", "x22", "x23", + "x24", "x25", "x26", "x27", "x28", "x29", + "lr", "xzr", "sp"}; + +const char* Simulator::wreg_names[] = {"w0", "w1", "w2", "w3", "w4", "w5", + "w6", "w7", "w8", "w9", "w10", "w11", + "w12", "w13", "w14", "w15", "w16", "w17", + "w18", "w19", "w20", "w21", "w22", "w23", + "w24", "w25", "w26", "w27", "w28", "w29", + "w30", "wzr", "wsp"}; + +const char* Simulator::hreg_names[] = {"h0", "h1", "h2", "h3", "h4", "h5", + "h6", "h7", "h8", "h9", "h10", "h11", + "h12", "h13", "h14", "h15", "h16", "h17", + "h18", "h19", "h20", "h21", "h22", "h23", + "h24", "h25", "h26", "h27", "h28", "h29", + "h30", "h31"}; + +const char* Simulator::sreg_names[] = {"s0", "s1", "s2", "s3", "s4", "s5", + "s6", "s7", "s8", "s9", "s10", "s11", + "s12", "s13", "s14", "s15", "s16", "s17", + "s18", "s19", "s20", "s21", "s22", "s23", + "s24", "s25", "s26", "s27", "s28", "s29", + "s30", "s31"}; + +const char* Simulator::dreg_names[] = {"d0", "d1", "d2", "d3", "d4", "d5", + "d6", "d7", "d8", "d9", "d10", "d11", + "d12", "d13", "d14", "d15", "d16", "d17", + "d18", "d19", "d20", "d21", "d22", "d23", + "d24", "d25", "d26", "d27", "d28", "d29", + "d30", "d31"}; + +const char* Simulator::vreg_names[] = {"v0", "v1", "v2", "v3", "v4", "v5", + "v6", "v7", "v8", "v9", "v10", "v11", + "v12", "v13", "v14", "v15", "v16", "v17", + "v18", "v19", "v20", "v21", "v22", "v23", + "v24", "v25", "v26", "v27", "v28", "v29", + "v30", "v31"}; + + +const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) { + VIXL_ASSERT(code < kNumberOfRegisters); + // If the code represents the stack pointer, index the name after zr. + if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) { + code = kZeroRegCode + 1; + } + return wreg_names[code]; +} + + +const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) { + VIXL_ASSERT(code < kNumberOfRegisters); + // If the code represents the stack pointer, index the name after zr. + if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) { + code = kZeroRegCode + 1; + } + return xreg_names[code]; +} + + +const char* Simulator::HRegNameForCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfFPRegisters); + return hreg_names[code]; +} + + +const char* Simulator::SRegNameForCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfFPRegisters); + return sreg_names[code]; +} + + +const char* Simulator::DRegNameForCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfFPRegisters); + return dreg_names[code]; +} + + +const char* Simulator::VRegNameForCode(unsigned code) { + VIXL_ASSERT(code < kNumberOfVRegisters); + return vreg_names[code]; +} + + +#define COLOUR(colour_code) "\033[0;" colour_code "m" +#define COLOUR_BOLD(colour_code) "\033[1;" colour_code "m" +#define COLOUR_HIGHLIGHT "\033[43m" +#define NORMAL "" +#define GREY "30" +#define RED "31" +#define GREEN "32" +#define YELLOW "33" +#define BLUE "34" +#define MAGENTA "35" +#define CYAN "36" +#define WHITE "37" +void Simulator::SetColouredTrace(bool value) { + coloured_trace_ = value; + + clr_normal = value ? COLOUR(NORMAL) : ""; + clr_flag_name = value ? COLOUR_BOLD(WHITE) : ""; + clr_flag_value = value ? COLOUR(NORMAL) : ""; + clr_reg_name = value ? COLOUR_BOLD(CYAN) : ""; + clr_reg_value = value ? COLOUR(CYAN) : ""; + clr_vreg_name = value ? COLOUR_BOLD(MAGENTA) : ""; + clr_vreg_value = value ? COLOUR(MAGENTA) : ""; + clr_memory_address = value ? COLOUR_BOLD(BLUE) : ""; + clr_warning = value ? COLOUR_BOLD(YELLOW) : ""; + clr_warning_message = value ? COLOUR(YELLOW) : ""; + clr_printf = value ? COLOUR(GREEN) : ""; + clr_branch_marker = value ? COLOUR(GREY) COLOUR_HIGHLIGHT : ""; + + if (value) { + print_disasm_->SetCPUFeaturesPrefix("// Needs: " COLOUR_BOLD(RED)); + print_disasm_->SetCPUFeaturesSuffix(COLOUR(NORMAL)); + } else { + print_disasm_->SetCPUFeaturesPrefix("// Needs: "); + print_disasm_->SetCPUFeaturesSuffix(""); + } +} + + +void Simulator::SetTraceParameters(int parameters) { + bool disasm_before = trace_parameters_ & LOG_DISASM; + trace_parameters_ = parameters; + bool disasm_after = trace_parameters_ & LOG_DISASM; + + if (disasm_before != disasm_after) { + if (disasm_after) { + decoder_->InsertVisitorBefore(print_disasm_, this); + } else { + decoder_->RemoveVisitor(print_disasm_); + } + } +} + + +void Simulator::SetInstructionStats(bool value) { + if (value != instruction_stats_) { + if (value) { + if (instrumentation_ == NULL) { + // Set the sample period to 10, as the VIXL examples and tests are + // short. + instrumentation_ = new Instrument("vixl_stats.csv", 10); + } + decoder_->AppendVisitor(instrumentation_); + } else if (instrumentation_ != NULL) { + decoder_->RemoveVisitor(instrumentation_); + } + instruction_stats_ = value; + } +} + +// Helpers --------------------------------------------------------------------- +uint64_t Simulator::AddWithCarry(unsigned reg_size, + bool set_flags, + uint64_t left, + uint64_t right, + int carry_in) { + VIXL_ASSERT((carry_in == 0) || (carry_in == 1)); + VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize)); + + uint64_t max_uint = (reg_size == kWRegSize) ? kWMaxUInt : kXMaxUInt; + uint64_t reg_mask = (reg_size == kWRegSize) ? kWRegMask : kXRegMask; + uint64_t sign_mask = (reg_size == kWRegSize) ? kWSignMask : kXSignMask; + + left &= reg_mask; + right &= reg_mask; + uint64_t result = (left + right + carry_in) & reg_mask; + + if (set_flags) { + ReadNzcv().SetN(CalcNFlag(result, reg_size)); + ReadNzcv().SetZ(CalcZFlag(result)); + + // Compute the C flag by comparing the result to the max unsigned integer. + uint64_t max_uint_2op = max_uint - carry_in; + bool C = (left > max_uint_2op) || ((max_uint_2op - left) < right); + ReadNzcv().SetC(C ? 1 : 0); + + // Overflow iff the sign bit is the same for the two inputs and different + // for the result. + uint64_t left_sign = left & sign_mask; + uint64_t right_sign = right & sign_mask; + uint64_t result_sign = result & sign_mask; + bool V = (left_sign == right_sign) && (left_sign != result_sign); + ReadNzcv().SetV(V ? 1 : 0); + + LogSystemRegister(NZCV); + } + return result; +} + + +int64_t Simulator::ShiftOperand(unsigned reg_size, + int64_t value, + Shift shift_type, + unsigned amount) const { + VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); + if (amount == 0) { + return value; + } + uint64_t uvalue = static_cast(value); + uint64_t mask = kWRegMask; + bool is_negative = (uvalue & kWSignMask) != 0; + if (reg_size == kXRegSize) { + mask = kXRegMask; + is_negative = (uvalue & kXSignMask) != 0; + } + + switch (shift_type) { + case LSL: + uvalue <<= amount; + break; + case LSR: + uvalue >>= amount; + break; + case ASR: + uvalue >>= amount; + if (is_negative) { + // Simulate sign-extension to 64 bits. + uvalue |= ~UINT64_C(0) << (reg_size - amount); + } + break; + case ROR: { + uvalue = RotateRight(uvalue, amount, reg_size); + break; + } + default: + VIXL_UNIMPLEMENTED(); + return 0; + } + uvalue &= mask; + + int64_t result; + memcpy(&result, &uvalue, sizeof(result)); + return result; +} + + +int64_t Simulator::ExtendValue(unsigned reg_size, + int64_t value, + Extend extend_type, + unsigned left_shift) const { + switch (extend_type) { + case UXTB: + value &= kByteMask; + break; + case UXTH: + value &= kHalfWordMask; + break; + case UXTW: + value &= kWordMask; + break; + case SXTB: + value &= kByteMask; + if ((value & 0x80) != 0) { + value |= ~UINT64_C(0) << 8; + } + break; + case SXTH: + value &= kHalfWordMask; + if ((value & 0x8000) != 0) { + value |= ~UINT64_C(0) << 16; + } + break; + case SXTW: + value &= kWordMask; + if ((value & 0x80000000) != 0) { + value |= ~UINT64_C(0) << 32; + } + break; + case UXTX: + case SXTX: + break; + default: + VIXL_UNREACHABLE(); + } + return ShiftOperand(reg_size, value, LSL, left_shift); +} + + +void Simulator::FPCompare(double val0, double val1, FPTrapFlags trap) { + AssertSupportedFPCR(); + + // TODO: This assumes that the C++ implementation handles comparisons in the + // way that we expect (as per AssertSupportedFPCR()). + bool process_exception = false; + if ((IsNaN(val0) != 0) || (IsNaN(val1) != 0)) { + ReadNzcv().SetRawValue(FPUnorderedFlag); + if (IsSignallingNaN(val0) || IsSignallingNaN(val1) || + (trap == EnableTrap)) { + process_exception = true; + } + } else if (val0 < val1) { + ReadNzcv().SetRawValue(FPLessThanFlag); + } else if (val0 > val1) { + ReadNzcv().SetRawValue(FPGreaterThanFlag); + } else if (val0 == val1) { + ReadNzcv().SetRawValue(FPEqualFlag); + } else { + VIXL_UNREACHABLE(); + } + LogSystemRegister(NZCV); + if (process_exception) FPProcessException(); +} + + +uint64_t Simulator::ComputeMemOperandAddress(const MemOperand& mem_op) const { + VIXL_ASSERT(mem_op.IsValid()); + int64_t base = ReadRegister(mem_op.GetBaseRegister()); + if (mem_op.IsImmediateOffset()) { + return base + mem_op.GetOffset(); + } else { + VIXL_ASSERT(mem_op.GetRegisterOffset().IsValid()); + int64_t offset = ReadRegister(mem_op.GetRegisterOffset()); + unsigned shift_amount = mem_op.GetShiftAmount(); + if (mem_op.GetShift() != NO_SHIFT) { + offset = ShiftOperand(kXRegSize, offset, mem_op.GetShift(), shift_amount); + } + if (mem_op.GetExtend() != NO_EXTEND) { + offset = ExtendValue(kXRegSize, offset, mem_op.GetExtend(), shift_amount); + } + return static_cast(base + offset); + } +} + + +Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatForSize( + unsigned reg_size, unsigned lane_size) { + VIXL_ASSERT(reg_size >= lane_size); + + uint32_t format = 0; + if (reg_size != lane_size) { + switch (reg_size) { + default: + VIXL_UNREACHABLE(); + break; + case kQRegSizeInBytes: + format = kPrintRegAsQVector; + break; + case kDRegSizeInBytes: + format = kPrintRegAsDVector; + break; + } + } + + switch (lane_size) { + default: + VIXL_UNREACHABLE(); + break; + case kQRegSizeInBytes: + format |= kPrintReg1Q; + break; + case kDRegSizeInBytes: + format |= kPrintReg1D; + break; + case kSRegSizeInBytes: + format |= kPrintReg1S; + break; + case kHRegSizeInBytes: + format |= kPrintReg1H; + break; + case kBRegSizeInBytes: + format |= kPrintReg1B; + break; + } + // These sizes would be duplicate case labels. + VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes); + VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes); + VIXL_STATIC_ASSERT(kPrintXReg == kPrintReg1D); + VIXL_STATIC_ASSERT(kPrintWReg == kPrintReg1S); + + return static_cast(format); +} + + +Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormat( + VectorFormat vform) { + switch (vform) { + default: + VIXL_UNREACHABLE(); + return kPrintReg16B; + case kFormat16B: + return kPrintReg16B; + case kFormat8B: + return kPrintReg8B; + case kFormat8H: + return kPrintReg8H; + case kFormat4H: + return kPrintReg4H; + case kFormat4S: + return kPrintReg4S; + case kFormat2S: + return kPrintReg2S; + case kFormat2D: + return kPrintReg2D; + case kFormat1D: + return kPrintReg1D; + + case kFormatB: + return kPrintReg1B; + case kFormatH: + return kPrintReg1H; + case kFormatS: + return kPrintReg1S; + case kFormatD: + return kPrintReg1D; + } +} + + +Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatFP( + VectorFormat vform) { + switch (vform) { + default: + VIXL_UNREACHABLE(); + return kPrintReg16B; + case kFormat8H: + return kPrintReg8HFP; + case kFormat4H: + return kPrintReg4HFP; + case kFormat4S: + return kPrintReg4SFP; + case kFormat2S: + return kPrintReg2SFP; + case kFormat2D: + return kPrintReg2DFP; + case kFormat1D: + return kPrintReg1DFP; + case kFormatH: + return kPrintReg1HFP; + case kFormatS: + return kPrintReg1SFP; + case kFormatD: + return kPrintReg1DFP; + } +} + + +void Simulator::PrintWrittenRegisters() { + for (unsigned i = 0; i < kNumberOfRegisters; i++) { + if (registers_[i].WrittenSinceLastLog()) PrintRegister(i); + } +} + + +void Simulator::PrintWrittenVRegisters() { + for (unsigned i = 0; i < kNumberOfVRegisters; i++) { + // At this point there is no type information, so print as a raw 1Q. + if (vregisters_[i].WrittenSinceLastLog()) PrintVRegister(i, kPrintReg1Q); + } +} + + +void Simulator::PrintSystemRegisters() { + PrintSystemRegister(NZCV); + PrintSystemRegister(FPCR); +} + + +void Simulator::PrintRegisters() { + for (unsigned i = 0; i < kNumberOfRegisters; i++) { + PrintRegister(i); + } +} + + +void Simulator::PrintVRegisters() { + for (unsigned i = 0; i < kNumberOfVRegisters; i++) { + // At this point there is no type information, so print as a raw 1Q. + PrintVRegister(i, kPrintReg1Q); + } +} + + +// Print a register's name and raw value. +// +// Only the least-significant `size_in_bytes` bytes of the register are printed, +// but the value is aligned as if the whole register had been printed. +// +// For typical register updates, size_in_bytes should be set to kXRegSizeInBytes +// -- the default -- so that the whole register is printed. Other values of +// size_in_bytes are intended for use when the register hasn't actually been +// updated (such as in PrintWrite). +// +// No newline is printed. This allows the caller to print more details (such as +// a memory access annotation). +void Simulator::PrintRegisterRawHelper(unsigned code, + Reg31Mode r31mode, + int size_in_bytes) { + // The template for all supported sizes. + // "# x{code}: 0xffeeddccbbaa9988" + // "# w{code}: 0xbbaa9988" + // "# w{code}<15:0>: 0x9988" + // "# w{code}<7:0>: 0x88" + unsigned padding_chars = (kXRegSizeInBytes - size_in_bytes) * 2; + + const char* name = ""; + const char* suffix = ""; + switch (size_in_bytes) { + case kXRegSizeInBytes: + name = XRegNameForCode(code, r31mode); + break; + case kWRegSizeInBytes: + name = WRegNameForCode(code, r31mode); + break; + case 2: + name = WRegNameForCode(code, r31mode); + suffix = "<15:0>"; + padding_chars -= strlen(suffix); + break; + case 1: + name = WRegNameForCode(code, r31mode); + suffix = "<7:0>"; + padding_chars -= strlen(suffix); + break; + default: + VIXL_UNREACHABLE(); + } + fprintf(stream_, "# %s%5s%s: ", clr_reg_name, name, suffix); + + // Print leading padding spaces. + VIXL_ASSERT(padding_chars < (kXRegSizeInBytes * 2)); + for (unsigned i = 0; i < padding_chars; i++) { + putc(' ', stream_); + } + + // Print the specified bits in hexadecimal format. + uint64_t bits = ReadRegister(code, r31mode); + bits &= kXRegMask >> ((kXRegSizeInBytes - size_in_bytes) * 8); + VIXL_STATIC_ASSERT(sizeof(bits) == kXRegSizeInBytes); + + int chars = size_in_bytes * 2; + fprintf(stream_, + "%s0x%0*" PRIx64 "%s", + clr_reg_value, + chars, + bits, + clr_normal); +} + + +void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) { + registers_[code].NotifyRegisterLogged(); + + // Don't print writes into xzr. + if ((code == kZeroRegCode) && (r31mode == Reg31IsZeroRegister)) { + return; + } + + // The template for all x and w registers: + // "# x{code}: 0x{value}" + // "# w{code}: 0x{value}" + + PrintRegisterRawHelper(code, r31mode); + fprintf(stream_, "\n"); +} + + +// Print a register's name and raw value. +// +// The `bytes` and `lsb` arguments can be used to limit the bytes that are +// printed. These arguments are intended for use in cases where register hasn't +// actually been updated (such as in PrintVWrite). +// +// No newline is printed. This allows the caller to print more details (such as +// a floating-point interpretation or a memory access annotation). +void Simulator::PrintVRegisterRawHelper(unsigned code, int bytes, int lsb) { + // The template for vector types: + // "# v{code}: 0xffeeddccbbaa99887766554433221100". + // An example with bytes=4 and lsb=8: + // "# v{code}: 0xbbaa9988 ". + fprintf(stream_, + "# %s%5s: %s", + clr_vreg_name, + VRegNameForCode(code), + clr_vreg_value); + + int msb = lsb + bytes - 1; + int byte = kQRegSizeInBytes - 1; + + // Print leading padding spaces. (Two spaces per byte.) + while (byte > msb) { + fprintf(stream_, " "); + byte--; + } + + // Print the specified part of the value, byte by byte. + qreg_t rawbits = ReadQRegister(code); + fprintf(stream_, "0x"); + while (byte >= lsb) { + fprintf(stream_, "%02x", rawbits.val[byte]); + byte--; + } + + // Print trailing padding spaces. + while (byte >= 0) { + fprintf(stream_, " "); + byte--; + } + fprintf(stream_, "%s", clr_normal); +} + + +// Print each of the specified lanes of a register as a float or double value. +// +// The `lane_count` and `lslane` arguments can be used to limit the lanes that +// are printed. These arguments are intended for use in cases where register +// hasn't actually been updated (such as in PrintVWrite). +// +// No newline is printed. This allows the caller to print more details (such as +// a memory access annotation). +void Simulator::PrintVRegisterFPHelper(unsigned code, + unsigned lane_size_in_bytes, + int lane_count, + int rightmost_lane) { + VIXL_ASSERT((lane_size_in_bytes == kHRegSizeInBytes) || + (lane_size_in_bytes == kSRegSizeInBytes) || + (lane_size_in_bytes == kDRegSizeInBytes)); + + unsigned msb = ((lane_count + rightmost_lane) * lane_size_in_bytes); + VIXL_ASSERT(msb <= kQRegSizeInBytes); + + // For scalar types ((lane_count == 1) && (rightmost_lane == 0)), a register + // name is used: + // " (h{code}: {value})" + // " (s{code}: {value})" + // " (d{code}: {value})" + // For vector types, "..." is used to represent one or more omitted lanes. + // " (..., {value}, {value}, ...)" + if (lane_size_in_bytes == kHRegSizeInBytes) { + // TODO: Trace tests will fail until we regenerate them. + return; + } + if ((lane_count == 1) && (rightmost_lane == 0)) { + const char* name; + switch (lane_size_in_bytes) { + case kHRegSizeInBytes: + name = HRegNameForCode(code); + break; + case kSRegSizeInBytes: + name = SRegNameForCode(code); + break; + case kDRegSizeInBytes: + name = DRegNameForCode(code); + break; + default: + name = NULL; + VIXL_UNREACHABLE(); + } + fprintf(stream_, " (%s%s: ", clr_vreg_name, name); + } else { + if (msb < (kQRegSizeInBytes - 1)) { + fprintf(stream_, " (..., "); + } else { + fprintf(stream_, " ("); + } + } + + // Print the list of values. + const char* separator = ""; + int leftmost_lane = rightmost_lane + lane_count - 1; + for (int lane = leftmost_lane; lane >= rightmost_lane; lane--) { + double value; + switch (lane_size_in_bytes) { + case kHRegSizeInBytes: + value = ReadVRegister(code).GetLane(lane); + break; + case kSRegSizeInBytes: + value = ReadVRegister(code).GetLane(lane); + break; + case kDRegSizeInBytes: + value = ReadVRegister(code).GetLane(lane); + break; + default: + value = 0.0; + VIXL_UNREACHABLE(); + } + if (IsNaN(value)) { + // The output for NaNs is implementation defined. Always print `nan`, so + // that traces are coherent across different implementations. + fprintf(stream_, "%s%snan%s", separator, clr_vreg_value, clr_normal); + } else { + fprintf(stream_, + "%s%s%#g%s", + separator, + clr_vreg_value, + value, + clr_normal); + } + separator = ", "; + } + + if (rightmost_lane > 0) { + fprintf(stream_, ", ..."); + } + fprintf(stream_, ")"); +} + + +void Simulator::PrintVRegister(unsigned code, PrintRegisterFormat format) { + vregisters_[code].NotifyRegisterLogged(); + + int lane_size_log2 = format & kPrintRegLaneSizeMask; + + int reg_size_log2; + if (format & kPrintRegAsQVector) { + reg_size_log2 = kQRegSizeInBytesLog2; + } else if (format & kPrintRegAsDVector) { + reg_size_log2 = kDRegSizeInBytesLog2; + } else { + // Scalar types. + reg_size_log2 = lane_size_log2; + } + + int lane_count = 1 << (reg_size_log2 - lane_size_log2); + int lane_size = 1 << lane_size_log2; + + // The template for vector types: + // "# v{code}: 0x{rawbits} (..., {value}, ...)". + // The template for scalar types: + // "# v{code}: 0x{rawbits} ({reg}:{value})". + // The values in parentheses after the bit representations are floating-point + // interpretations. They are displayed only if the kPrintVRegAsFP bit is set. + + PrintVRegisterRawHelper(code); + if (format & kPrintRegAsFP) { + PrintVRegisterFPHelper(code, lane_size, lane_count); + } + + fprintf(stream_, "\n"); +} + + +void Simulator::PrintSystemRegister(SystemRegister id) { + switch (id) { + case NZCV: + fprintf(stream_, + "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n", + clr_flag_name, + clr_flag_value, + ReadNzcv().GetN(), + ReadNzcv().GetZ(), + ReadNzcv().GetC(), + ReadNzcv().GetV(), + clr_normal); + break; + case FPCR: { + static const char* rmode[] = {"0b00 (Round to Nearest)", + "0b01 (Round towards Plus Infinity)", + "0b10 (Round towards Minus Infinity)", + "0b11 (Round towards Zero)"}; + VIXL_ASSERT(ReadFpcr().GetRMode() < ArrayLength(rmode)); + fprintf(stream_, + "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n", + clr_flag_name, + clr_flag_value, + ReadFpcr().GetAHP(), + ReadFpcr().GetDN(), + ReadFpcr().GetFZ(), + rmode[ReadFpcr().GetRMode()], + clr_normal); + break; + } + default: + VIXL_UNREACHABLE(); + } +} + + +void Simulator::PrintRead(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format) { + registers_[reg_code].NotifyRegisterLogged(); + + USE(format); + + // The template is "# {reg}: 0x{value} <- {address}". + PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister); + fprintf(stream_, + " <- %s0x%016" PRIxPTR "%s\n", + clr_memory_address, + address, + clr_normal); +} + + +void Simulator::PrintVRead(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format, + unsigned lane) { + vregisters_[reg_code].NotifyRegisterLogged(); + + // The template is "# v{code}: 0x{rawbits} <- address". + PrintVRegisterRawHelper(reg_code); + if (format & kPrintRegAsFP) { + PrintVRegisterFPHelper(reg_code, + GetPrintRegLaneSizeInBytes(format), + GetPrintRegLaneCount(format), + lane); + } + fprintf(stream_, + " <- %s0x%016" PRIxPTR "%s\n", + clr_memory_address, + address, + clr_normal); +} + + +void Simulator::PrintWrite(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format) { + VIXL_ASSERT(GetPrintRegLaneCount(format) == 1); + + // The template is "# v{code}: 0x{value} -> {address}". To keep the trace tidy + // and readable, the value is aligned with the values in the register trace. + PrintRegisterRawHelper(reg_code, + Reg31IsZeroRegister, + GetPrintRegSizeInBytes(format)); + fprintf(stream_, + " -> %s0x%016" PRIxPTR "%s\n", + clr_memory_address, + address, + clr_normal); +} + + +void Simulator::PrintVWrite(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format, + unsigned lane) { + // The templates: + // "# v{code}: 0x{rawbits} -> {address}" + // "# v{code}: 0x{rawbits} (..., {value}, ...) -> {address}". + // "# v{code}: 0x{rawbits} ({reg}:{value}) -> {address}" + // Because this trace doesn't represent a change to the source register's + // value, only the relevant part of the value is printed. To keep the trace + // tidy and readable, the raw value is aligned with the other values in the + // register trace. + int lane_count = GetPrintRegLaneCount(format); + int lane_size = GetPrintRegLaneSizeInBytes(format); + int reg_size = GetPrintRegSizeInBytes(format); + PrintVRegisterRawHelper(reg_code, reg_size, lane_size * lane); + if (format & kPrintRegAsFP) { + PrintVRegisterFPHelper(reg_code, lane_size, lane_count, lane); + } + fprintf(stream_, + " -> %s0x%016" PRIxPTR "%s\n", + clr_memory_address, + address, + clr_normal); +} + + +void Simulator::PrintTakenBranch(const Instruction* target) { + fprintf(stream_, + "# %sBranch%s to 0x%016" PRIx64 ".\n", + clr_branch_marker, + clr_normal, + reinterpret_cast(target)); +} + + +// Visitors--------------------------------------------------------------------- + +void Simulator::VisitUnimplemented(const Instruction* instr) { + printf("Unimplemented instruction at %p: 0x%08" PRIx32 "\n", + reinterpret_cast(instr), + instr->GetInstructionBits()); + VIXL_UNIMPLEMENTED(); +} + + +void Simulator::VisitUnallocated(const Instruction* instr) { + printf("Unallocated instruction at %p: 0x%08" PRIx32 "\n", + reinterpret_cast(instr), + instr->GetInstructionBits()); + VIXL_UNIMPLEMENTED(); +} + + +void Simulator::VisitPCRelAddressing(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(PCRelAddressingMask) == ADR) || + (instr->Mask(PCRelAddressingMask) == ADRP)); + + WriteRegister(instr->GetRd(), instr->GetImmPCOffsetTarget()); +} + + +void Simulator::VisitUnconditionalBranch(const Instruction* instr) { + switch (instr->Mask(UnconditionalBranchMask)) { + case BL: + WriteLr(instr->GetNextInstruction()); + VIXL_FALLTHROUGH(); + case B: + WritePc(instr->GetImmPCOffsetTarget()); + break; + default: + VIXL_UNREACHABLE(); + } +} + + +void Simulator::VisitConditionalBranch(const Instruction* instr) { + VIXL_ASSERT(instr->Mask(ConditionalBranchMask) == B_cond); + if (ConditionPassed(instr->GetConditionBranch())) { + WritePc(instr->GetImmPCOffsetTarget()); + } +} + +BType Simulator::GetBTypeFromInstruction(const Instruction* instr) const { + switch (instr->Mask(UnconditionalBranchToRegisterMask)) { + case BLR: + case BLRAA: + case BLRAB: + case BLRAAZ: + case BLRABZ: + return BranchAndLink; + case BR: + case BRAA: + case BRAB: + case BRAAZ: + case BRABZ: + if ((instr->GetRn() == 16) || (instr->GetRn() == 17) || + !PcIsInGuardedPage()) { + return BranchFromUnguardedOrToIP; + } + return BranchFromGuardedNotToIP; + } + return DefaultBType; +} + +void Simulator::VisitUnconditionalBranchToRegister(const Instruction* instr) { + bool authenticate = false; + bool link = false; + uint64_t addr = ReadXRegister(instr->GetRn()); + uint64_t context = 0; + + switch (instr->Mask(UnconditionalBranchToRegisterMask)) { + case BLR: + link = true; + VIXL_FALLTHROUGH(); + case BR: + case RET: + break; + + case BLRAAZ: + case BLRABZ: + link = true; + VIXL_FALLTHROUGH(); + case BRAAZ: + case BRABZ: + authenticate = true; + break; + + case BLRAA: + case BLRAB: + link = true; + VIXL_FALLTHROUGH(); + case BRAA: + case BRAB: + authenticate = true; + context = ReadXRegister(instr->GetRd()); + break; + + case RETAA: + case RETAB: + authenticate = true; + addr = ReadXRegister(kLinkRegCode); + context = ReadXRegister(31, Reg31IsStackPointer); + break; + default: + VIXL_UNREACHABLE(); + } + + if (link) { + WriteLr(instr->GetNextInstruction()); + } + + if (authenticate) { + PACKey key = (instr->ExtractBit(10) == 0) ? kPACKeyIA : kPACKeyIB; + addr = AuthPAC(addr, context, key, kInstructionPointer); + + int error_lsb = GetTopPACBit(addr, kInstructionPointer) - 2; + if (((addr >> error_lsb) & 0x3) != 0x0) { + VIXL_ABORT_WITH_MSG("Failed to authenticate pointer."); + } + } + + WritePc(Instruction::Cast(addr)); + WriteNextBType(GetBTypeFromInstruction(instr)); +} + + +void Simulator::VisitTestBranch(const Instruction* instr) { + unsigned bit_pos = + (instr->GetImmTestBranchBit5() << 5) | instr->GetImmTestBranchBit40(); + bool bit_zero = ((ReadXRegister(instr->GetRt()) >> bit_pos) & 1) == 0; + bool take_branch = false; + switch (instr->Mask(TestBranchMask)) { + case TBZ: + take_branch = bit_zero; + break; + case TBNZ: + take_branch = !bit_zero; + break; + default: + VIXL_UNIMPLEMENTED(); + } + if (take_branch) { + WritePc(instr->GetImmPCOffsetTarget()); + } +} + + +void Simulator::VisitCompareBranch(const Instruction* instr) { + unsigned rt = instr->GetRt(); + bool take_branch = false; + switch (instr->Mask(CompareBranchMask)) { + case CBZ_w: + take_branch = (ReadWRegister(rt) == 0); + break; + case CBZ_x: + take_branch = (ReadXRegister(rt) == 0); + break; + case CBNZ_w: + take_branch = (ReadWRegister(rt) != 0); + break; + case CBNZ_x: + take_branch = (ReadXRegister(rt) != 0); + break; + default: + VIXL_UNIMPLEMENTED(); + } + if (take_branch) { + WritePc(instr->GetImmPCOffsetTarget()); + } +} + + +void Simulator::AddSubHelper(const Instruction* instr, int64_t op2) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + bool set_flags = instr->GetFlagsUpdate(); + int64_t new_val = 0; + Instr operation = instr->Mask(AddSubOpMask); + + switch (operation) { + case ADD: + case ADDS: { + new_val = AddWithCarry(reg_size, + set_flags, + ReadRegister(reg_size, + instr->GetRn(), + instr->GetRnMode()), + op2); + break; + } + case SUB: + case SUBS: { + new_val = AddWithCarry(reg_size, + set_flags, + ReadRegister(reg_size, + instr->GetRn(), + instr->GetRnMode()), + ~op2, + 1); + break; + } + default: + VIXL_UNREACHABLE(); + } + + WriteRegister(reg_size, + instr->GetRd(), + new_val, + LogRegWrites, + instr->GetRdMode()); +} + + +void Simulator::VisitAddSubShifted(const Instruction* instr) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + int64_t op2 = ShiftOperand(reg_size, + ReadRegister(reg_size, instr->GetRm()), + static_cast(instr->GetShiftDP()), + instr->GetImmDPShift()); + AddSubHelper(instr, op2); +} + + +void Simulator::VisitAddSubImmediate(const Instruction* instr) { + int64_t op2 = instr->GetImmAddSub() + << ((instr->GetShiftAddSub() == 1) ? 12 : 0); + AddSubHelper(instr, op2); +} + + +void Simulator::VisitAddSubExtended(const Instruction* instr) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + int64_t op2 = ExtendValue(reg_size, + ReadRegister(reg_size, instr->GetRm()), + static_cast(instr->GetExtendMode()), + instr->GetImmExtendShift()); + AddSubHelper(instr, op2); +} + + +void Simulator::VisitAddSubWithCarry(const Instruction* instr) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + int64_t op2 = ReadRegister(reg_size, instr->GetRm()); + int64_t new_val; + + if ((instr->Mask(AddSubOpMask) == SUB) || + (instr->Mask(AddSubOpMask) == SUBS)) { + op2 = ~op2; + } + + new_val = AddWithCarry(reg_size, + instr->GetFlagsUpdate(), + ReadRegister(reg_size, instr->GetRn()), + op2, + ReadC()); + + WriteRegister(reg_size, instr->GetRd(), new_val); +} + + +void Simulator::VisitRotateRightIntoFlags(const Instruction* instr) { + switch (instr->Mask(RotateRightIntoFlagsMask)) { + case RMIF: { + uint64_t value = ReadRegister(instr->GetRn()); + unsigned shift = instr->GetImmRMIFRotation(); + unsigned mask = instr->GetNzcv(); + uint64_t rotated = RotateRight(value, shift, kXRegSize); + + ReadNzcv().SetFlags((rotated & mask) | (ReadNzcv().GetFlags() & ~mask)); + break; + } + } +} + + +void Simulator::VisitEvaluateIntoFlags(const Instruction* instr) { + uint32_t value = ReadRegister(instr->GetRn()); + unsigned msb = (instr->Mask(EvaluateIntoFlagsMask) == SETF16) ? 15 : 7; + + unsigned sign_bit = (value >> msb) & 1; + unsigned overflow_bit = (value >> (msb + 1)) & 1; + ReadNzcv().SetN(sign_bit); + ReadNzcv().SetZ((value << (31 - msb)) == 0); + ReadNzcv().SetV(sign_bit ^ overflow_bit); +} + + +void Simulator::VisitLogicalShifted(const Instruction* instr) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + Shift shift_type = static_cast(instr->GetShiftDP()); + unsigned shift_amount = instr->GetImmDPShift(); + int64_t op2 = ShiftOperand(reg_size, + ReadRegister(reg_size, instr->GetRm()), + shift_type, + shift_amount); + if (instr->Mask(NOT) == NOT) { + op2 = ~op2; + } + LogicalHelper(instr, op2); +} + + +void Simulator::VisitLogicalImmediate(const Instruction* instr) { + LogicalHelper(instr, instr->GetImmLogical()); +} + + +void Simulator::LogicalHelper(const Instruction* instr, int64_t op2) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + int64_t op1 = ReadRegister(reg_size, instr->GetRn()); + int64_t result = 0; + bool update_flags = false; + + // Switch on the logical operation, stripping out the NOT bit, as it has a + // different meaning for logical immediate instructions. + switch (instr->Mask(LogicalOpMask & ~NOT)) { + case ANDS: + update_flags = true; + VIXL_FALLTHROUGH(); + case AND: + result = op1 & op2; + break; + case ORR: + result = op1 | op2; + break; + case EOR: + result = op1 ^ op2; + break; + default: + VIXL_UNIMPLEMENTED(); + } + + if (update_flags) { + ReadNzcv().SetN(CalcNFlag(result, reg_size)); + ReadNzcv().SetZ(CalcZFlag(result)); + ReadNzcv().SetC(0); + ReadNzcv().SetV(0); + LogSystemRegister(NZCV); + } + + WriteRegister(reg_size, + instr->GetRd(), + result, + LogRegWrites, + instr->GetRdMode()); +} + + +void Simulator::VisitConditionalCompareRegister(const Instruction* instr) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + ConditionalCompareHelper(instr, ReadRegister(reg_size, instr->GetRm())); +} + + +void Simulator::VisitConditionalCompareImmediate(const Instruction* instr) { + ConditionalCompareHelper(instr, instr->GetImmCondCmp()); +} + + +void Simulator::ConditionalCompareHelper(const Instruction* instr, + int64_t op2) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + int64_t op1 = ReadRegister(reg_size, instr->GetRn()); + + if (ConditionPassed(instr->GetCondition())) { + // If the condition passes, set the status flags to the result of comparing + // the operands. + if (instr->Mask(ConditionalCompareMask) == CCMP) { + AddWithCarry(reg_size, true, op1, ~op2, 1); + } else { + VIXL_ASSERT(instr->Mask(ConditionalCompareMask) == CCMN); + AddWithCarry(reg_size, true, op1, op2, 0); + } + } else { + // If the condition fails, set the status flags to the nzcv immediate. + ReadNzcv().SetFlags(instr->GetNzcv()); + LogSystemRegister(NZCV); + } +} + + +void Simulator::VisitLoadStoreUnsignedOffset(const Instruction* instr) { + int offset = instr->GetImmLSUnsigned() << instr->GetSizeLS(); + LoadStoreHelper(instr, offset, Offset); +} + + +void Simulator::VisitLoadStoreUnscaledOffset(const Instruction* instr) { + LoadStoreHelper(instr, instr->GetImmLS(), Offset); +} + + +void Simulator::VisitLoadStorePreIndex(const Instruction* instr) { + LoadStoreHelper(instr, instr->GetImmLS(), PreIndex); +} + + +void Simulator::VisitLoadStorePostIndex(const Instruction* instr) { + LoadStoreHelper(instr, instr->GetImmLS(), PostIndex); +} + + +template +void Simulator::LoadAcquireRCpcUnscaledOffsetHelper(const Instruction* instr) { + unsigned rt = instr->GetRt(); + unsigned rn = instr->GetRn(); + + unsigned element_size = sizeof(T2); + uint64_t address = ReadRegister(rn, Reg31IsStackPointer); + int offset = instr->GetImmLS(); + address += offset; + + // Verify that the address is available to the host. + VIXL_ASSERT(address == static_cast(address)); + + // Check the alignment of `address`. + if (AlignDown(address, 16) != AlignDown(address + element_size - 1, 16)) { + VIXL_ALIGNMENT_EXCEPTION(); + } + + WriteRegister(rt, static_cast(Memory::Read(address))); + + // Approximate load-acquire by issuing a full barrier after the load. + __sync_synchronize(); + + LogRead(address, rt, GetPrintRegisterFormat(element_size)); +} + + +template +void Simulator::StoreReleaseUnscaledOffsetHelper(const Instruction* instr) { + unsigned rt = instr->GetRt(); + unsigned rn = instr->GetRn(); + + unsigned element_size = sizeof(T); + uint64_t address = ReadRegister(rn, Reg31IsStackPointer); + int offset = instr->GetImmLS(); + address += offset; + + // Verify that the address is available to the host. + VIXL_ASSERT(address == static_cast(address)); + + // Check the alignment of `address`. + if (AlignDown(address, 16) != AlignDown(address + element_size - 1, 16)) { + VIXL_ALIGNMENT_EXCEPTION(); + } + + // Approximate store-release by issuing a full barrier after the load. + __sync_synchronize(); + + Memory::Write(address, ReadRegister(rt)); + + LogWrite(address, rt, GetPrintRegisterFormat(element_size)); +} + + +void Simulator::VisitLoadStoreRCpcUnscaledOffset(const Instruction* instr) { + switch (instr->Mask(LoadStoreRCpcUnscaledOffsetMask)) { + case LDAPURB: + LoadAcquireRCpcUnscaledOffsetHelper(instr); + break; + case LDAPURH: + LoadAcquireRCpcUnscaledOffsetHelper(instr); + break; + case LDAPUR_w: + LoadAcquireRCpcUnscaledOffsetHelper(instr); + break; + case LDAPUR_x: + LoadAcquireRCpcUnscaledOffsetHelper(instr); + break; + case LDAPURSB_w: + LoadAcquireRCpcUnscaledOffsetHelper(instr); + break; + case LDAPURSB_x: + LoadAcquireRCpcUnscaledOffsetHelper(instr); + break; + case LDAPURSH_w: + LoadAcquireRCpcUnscaledOffsetHelper(instr); + break; + case LDAPURSH_x: + LoadAcquireRCpcUnscaledOffsetHelper(instr); + break; + case LDAPURSW: + LoadAcquireRCpcUnscaledOffsetHelper(instr); + break; + case STLURB: + StoreReleaseUnscaledOffsetHelper(instr); + break; + case STLURH: + StoreReleaseUnscaledOffsetHelper(instr); + break; + case STLUR_w: + StoreReleaseUnscaledOffsetHelper(instr); + break; + case STLUR_x: + StoreReleaseUnscaledOffsetHelper(instr); + break; + } +} + + +void Simulator::VisitLoadStorePAC(const Instruction* instr) { + unsigned dst = instr->GetRt(); + unsigned addr_reg = instr->GetRn(); + + uint64_t address = ReadXRegister(addr_reg, Reg31IsStackPointer); + + PACKey key = (instr->ExtractBit(23) == 0) ? kPACKeyDA : kPACKeyDB; + address = AuthPAC(address, 0, key, kDataPointer); + + int error_lsb = GetTopPACBit(address, kInstructionPointer) - 2; + if (((address >> error_lsb) & 0x3) != 0x0) { + VIXL_ABORT_WITH_MSG("Failed to authenticate pointer."); + } + + + if ((addr_reg == 31) && ((address % 16) != 0)) { + // When the base register is SP the stack pointer is required to be + // quadword aligned prior to the address calculation and write-backs. + // Misalignment will cause a stack alignment fault. + VIXL_ALIGNMENT_EXCEPTION(); + } + + int64_t offset = instr->GetImmLSPAC(); + address += offset; + + if (instr->Mask(LoadStorePACPreBit) == LoadStorePACPreBit) { + // Pre-index mode. + VIXL_ASSERT(offset != 0); + WriteXRegister(addr_reg, address, LogRegWrites, Reg31IsStackPointer); + } + + uintptr_t addr_ptr = static_cast(address); + + // Verify that the calculated address is available to the host. + VIXL_ASSERT(address == addr_ptr); + + WriteXRegister(dst, Memory::Read(addr_ptr), NoRegLog); + unsigned access_size = 1 << 3; + LogRead(addr_ptr, dst, GetPrintRegisterFormatForSize(access_size)); +} + + +void Simulator::VisitLoadStoreRegisterOffset(const Instruction* instr) { + Extend ext = static_cast(instr->GetExtendMode()); + VIXL_ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX)); + unsigned shift_amount = instr->GetImmShiftLS() * instr->GetSizeLS(); + + int64_t offset = + ExtendValue(kXRegSize, ReadXRegister(instr->GetRm()), ext, shift_amount); + LoadStoreHelper(instr, offset, Offset); +} + + +void Simulator::LoadStoreHelper(const Instruction* instr, + int64_t offset, + AddrMode addrmode) { + unsigned srcdst = instr->GetRt(); + uintptr_t address = AddressModeHelper(instr->GetRn(), offset, addrmode); + + LoadStoreOp op = static_cast(instr->Mask(LoadStoreMask)); + switch (op) { + case LDRB_w: + WriteWRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDRH_w: + WriteWRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDR_w: + WriteWRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDR_x: + WriteXRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDRSB_w: + WriteWRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDRSH_w: + WriteWRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDRSB_x: + WriteXRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDRSH_x: + WriteXRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDRSW_x: + WriteXRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDR_b: + WriteBRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDR_h: + WriteHRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDR_s: + WriteSRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDR_d: + WriteDRegister(srcdst, Memory::Read(address), NoRegLog); + break; + case LDR_q: + WriteQRegister(srcdst, Memory::Read(address), NoRegLog); + break; + + case STRB_w: + Memory::Write(address, ReadWRegister(srcdst)); + break; + case STRH_w: + Memory::Write(address, ReadWRegister(srcdst)); + break; + case STR_w: + Memory::Write(address, ReadWRegister(srcdst)); + break; + case STR_x: + Memory::Write(address, ReadXRegister(srcdst)); + break; + case STR_b: + Memory::Write(address, ReadBRegister(srcdst)); + break; + case STR_h: + Memory::Write(address, ReadHRegisterBits(srcdst)); + break; + case STR_s: + Memory::Write(address, ReadSRegister(srcdst)); + break; + case STR_d: + Memory::Write(address, ReadDRegister(srcdst)); + break; + case STR_q: + Memory::Write(address, ReadQRegister(srcdst)); + break; + + // Ignore prfm hint instructions. + case PRFM: + break; + + default: + VIXL_UNIMPLEMENTED(); + } + + unsigned access_size = 1 << instr->GetSizeLS(); + if (instr->IsLoad()) { + if ((op == LDR_s) || (op == LDR_d)) { + LogVRead(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size)); + } else if ((op == LDR_b) || (op == LDR_h) || (op == LDR_q)) { + LogVRead(address, srcdst, GetPrintRegisterFormatForSize(access_size)); + } else { + LogRead(address, srcdst, GetPrintRegisterFormatForSize(access_size)); + } + } else if (instr->IsStore()) { + if ((op == STR_s) || (op == STR_d)) { + LogVWrite(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size)); + } else if ((op == STR_b) || (op == STR_h) || (op == STR_q)) { + LogVWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size)); + } else { + LogWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size)); + } + } else { + VIXL_ASSERT(op == PRFM); + } + + local_monitor_.MaybeClear(); +} + + +void Simulator::VisitLoadStorePairOffset(const Instruction* instr) { + LoadStorePairHelper(instr, Offset); +} + + +void Simulator::VisitLoadStorePairPreIndex(const Instruction* instr) { + LoadStorePairHelper(instr, PreIndex); +} + + +void Simulator::VisitLoadStorePairPostIndex(const Instruction* instr) { + LoadStorePairHelper(instr, PostIndex); +} + + +void Simulator::VisitLoadStorePairNonTemporal(const Instruction* instr) { + LoadStorePairHelper(instr, Offset); +} + + +void Simulator::LoadStorePairHelper(const Instruction* instr, + AddrMode addrmode) { + unsigned rt = instr->GetRt(); + unsigned rt2 = instr->GetRt2(); + int element_size = 1 << instr->GetSizeLSPair(); + int64_t offset = instr->GetImmLSPair() * element_size; + uintptr_t address = AddressModeHelper(instr->GetRn(), offset, addrmode); + uintptr_t address2 = address + element_size; + + LoadStorePairOp op = + static_cast(instr->Mask(LoadStorePairMask)); + + // 'rt' and 'rt2' can only be aliased for stores. + VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2)); + + switch (op) { + // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS). We + // will print a more detailed log. + case LDP_w: { + WriteWRegister(rt, Memory::Read(address), NoRegLog); + WriteWRegister(rt2, Memory::Read(address2), NoRegLog); + break; + } + case LDP_s: { + WriteSRegister(rt, Memory::Read(address), NoRegLog); + WriteSRegister(rt2, Memory::Read(address2), NoRegLog); + break; + } + case LDP_x: { + WriteXRegister(rt, Memory::Read(address), NoRegLog); + WriteXRegister(rt2, Memory::Read(address2), NoRegLog); + break; + } + case LDP_d: { + WriteDRegister(rt, Memory::Read(address), NoRegLog); + WriteDRegister(rt2, Memory::Read(address2), NoRegLog); + break; + } + case LDP_q: { + WriteQRegister(rt, Memory::Read(address), NoRegLog); + WriteQRegister(rt2, Memory::Read(address2), NoRegLog); + break; + } + case LDPSW_x: { + WriteXRegister(rt, Memory::Read(address), NoRegLog); + WriteXRegister(rt2, Memory::Read(address2), NoRegLog); + break; + } + case STP_w: { + Memory::Write(address, ReadWRegister(rt)); + Memory::Write(address2, ReadWRegister(rt2)); + break; + } + case STP_s: { + Memory::Write(address, ReadSRegister(rt)); + Memory::Write(address2, ReadSRegister(rt2)); + break; + } + case STP_x: { + Memory::Write(address, ReadXRegister(rt)); + Memory::Write(address2, ReadXRegister(rt2)); + break; + } + case STP_d: { + Memory::Write(address, ReadDRegister(rt)); + Memory::Write(address2, ReadDRegister(rt2)); + break; + } + case STP_q: { + Memory::Write(address, ReadQRegister(rt)); + Memory::Write(address2, ReadQRegister(rt2)); + break; + } + default: + VIXL_UNREACHABLE(); + } + + // Print a detailed trace (including the memory address) instead of the basic + // register:value trace generated by set_*reg(). + if (instr->IsLoad()) { + if ((op == LDP_s) || (op == LDP_d)) { + LogVRead(address, rt, GetPrintRegisterFormatForSizeFP(element_size)); + LogVRead(address2, rt2, GetPrintRegisterFormatForSizeFP(element_size)); + } else if (op == LDP_q) { + LogVRead(address, rt, GetPrintRegisterFormatForSize(element_size)); + LogVRead(address2, rt2, GetPrintRegisterFormatForSize(element_size)); + } else { + LogRead(address, rt, GetPrintRegisterFormatForSize(element_size)); + LogRead(address2, rt2, GetPrintRegisterFormatForSize(element_size)); + } + } else { + if ((op == STP_s) || (op == STP_d)) { + LogVWrite(address, rt, GetPrintRegisterFormatForSizeFP(element_size)); + LogVWrite(address2, rt2, GetPrintRegisterFormatForSizeFP(element_size)); + } else if (op == STP_q) { + LogVWrite(address, rt, GetPrintRegisterFormatForSize(element_size)); + LogVWrite(address2, rt2, GetPrintRegisterFormatForSize(element_size)); + } else { + LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size)); + LogWrite(address2, rt2, GetPrintRegisterFormatForSize(element_size)); + } + } + + local_monitor_.MaybeClear(); +} + + +template +void Simulator::CompareAndSwapHelper(const Instruction* instr) { + unsigned rs = instr->GetRs(); + unsigned rt = instr->GetRt(); + unsigned rn = instr->GetRn(); + + unsigned element_size = sizeof(T); + uint64_t address = ReadRegister(rn, Reg31IsStackPointer); + + CheckIsValidUnalignedAtomicAccess(rn, address, element_size); + + bool is_acquire = instr->ExtractBit(22) == 1; + bool is_release = instr->ExtractBit(15) == 1; + + T comparevalue = ReadRegister(rs); + T newvalue = ReadRegister(rt); + + // The architecture permits that the data read clears any exclusive monitors + // associated with that location, even if the compare subsequently fails. + local_monitor_.Clear(); + + T data = Memory::Read(address); + if (is_acquire) { + // Approximate load-acquire by issuing a full barrier after the load. + __sync_synchronize(); + } + + if (data == comparevalue) { + if (is_release) { + // Approximate store-release by issuing a full barrier before the store. + __sync_synchronize(); + } + Memory::Write(address, newvalue); + LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size)); + } + WriteRegister(rs, data); + LogRead(address, rs, GetPrintRegisterFormatForSize(element_size)); +} + + +template +void Simulator::CompareAndSwapPairHelper(const Instruction* instr) { + VIXL_ASSERT((sizeof(T) == 4) || (sizeof(T) == 8)); + unsigned rs = instr->GetRs(); + unsigned rt = instr->GetRt(); + unsigned rn = instr->GetRn(); + + VIXL_ASSERT((rs % 2 == 0) && (rs % 2 == 0)); + + unsigned element_size = sizeof(T); + uint64_t address = ReadRegister(rn, Reg31IsStackPointer); + + CheckIsValidUnalignedAtomicAccess(rn, address, element_size * 2); + + uint64_t address2 = address + element_size; + + bool is_acquire = instr->ExtractBit(22) == 1; + bool is_release = instr->ExtractBit(15) == 1; + + T comparevalue_high = ReadRegister(rs + 1); + T comparevalue_low = ReadRegister(rs); + T newvalue_high = ReadRegister(rt + 1); + T newvalue_low = ReadRegister(rt); + + // The architecture permits that the data read clears any exclusive monitors + // associated with that location, even if the compare subsequently fails. + local_monitor_.Clear(); + + T data_high = Memory::Read(address); + T data_low = Memory::Read(address2); + + if (is_acquire) { + // Approximate load-acquire by issuing a full barrier after the load. + __sync_synchronize(); + } + + bool same = + (data_high == comparevalue_high) && (data_low == comparevalue_low); + if (same) { + if (is_release) { + // Approximate store-release by issuing a full barrier before the store. + __sync_synchronize(); + } + + Memory::Write(address, newvalue_high); + Memory::Write(address2, newvalue_low); + } + + WriteRegister(rs + 1, data_high); + WriteRegister(rs, data_low); + + LogRead(address, rs + 1, GetPrintRegisterFormatForSize(element_size)); + LogRead(address2, rs, GetPrintRegisterFormatForSize(element_size)); + + if (same) { + LogWrite(address, rt + 1, GetPrintRegisterFormatForSize(element_size)); + LogWrite(address2, rt, GetPrintRegisterFormatForSize(element_size)); + } +} + + +void Simulator::PrintExclusiveAccessWarning() { + if (print_exclusive_access_warning_) { + fprintf(stderr, + "%sWARNING:%s VIXL simulator support for " + "load-/store-/clear-exclusive " + "instructions is limited. Refer to the README for details.%s\n", + clr_warning, + clr_warning_message, + clr_normal); + print_exclusive_access_warning_ = false; + } +} + + +void Simulator::VisitLoadStoreExclusive(const Instruction* instr) { + LoadStoreExclusive op = + static_cast(instr->Mask(LoadStoreExclusiveMask)); + + switch (op) { + case CAS_w: + case CASA_w: + case CASL_w: + case CASAL_w: + CompareAndSwapHelper(instr); + break; + case CAS_x: + case CASA_x: + case CASL_x: + case CASAL_x: + CompareAndSwapHelper(instr); + break; + case CASB: + case CASAB: + case CASLB: + case CASALB: + CompareAndSwapHelper(instr); + break; + case CASH: + case CASAH: + case CASLH: + case CASALH: + CompareAndSwapHelper(instr); + break; + case CASP_w: + case CASPA_w: + case CASPL_w: + case CASPAL_w: + CompareAndSwapPairHelper(instr); + break; + case CASP_x: + case CASPA_x: + case CASPL_x: + case CASPAL_x: + CompareAndSwapPairHelper(instr); + break; + default: + PrintExclusiveAccessWarning(); + + unsigned rs = instr->GetRs(); + unsigned rt = instr->GetRt(); + unsigned rt2 = instr->GetRt2(); + unsigned rn = instr->GetRn(); + + bool is_exclusive = !instr->GetLdStXNotExclusive(); + bool is_acquire_release = + !is_exclusive || instr->GetLdStXAcquireRelease(); + bool is_load = instr->GetLdStXLoad(); + bool is_pair = instr->GetLdStXPair(); + + unsigned element_size = 1 << instr->GetLdStXSizeLog2(); + unsigned access_size = is_pair ? element_size * 2 : element_size; + uint64_t address = ReadRegister(rn, Reg31IsStackPointer); + + CheckIsValidUnalignedAtomicAccess(rn, address, access_size); + + if (is_load) { + if (is_exclusive) { + local_monitor_.MarkExclusive(address, access_size); + } else { + // Any non-exclusive load can clear the local monitor as a side + // effect. We don't need to do this, but it is useful to stress the + // simulated code. + local_monitor_.Clear(); + } + + // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS). + // We will print a more detailed log. + switch (op) { + case LDXRB_w: + case LDAXRB_w: + case LDARB_w: + case LDLARB: + WriteWRegister(rt, Memory::Read(address), NoRegLog); + break; + case LDXRH_w: + case LDAXRH_w: + case LDARH_w: + case LDLARH: + WriteWRegister(rt, Memory::Read(address), NoRegLog); + break; + case LDXR_w: + case LDAXR_w: + case LDAR_w: + case LDLAR_w: + WriteWRegister(rt, Memory::Read(address), NoRegLog); + break; + case LDXR_x: + case LDAXR_x: + case LDAR_x: + case LDLAR_x: + WriteXRegister(rt, Memory::Read(address), NoRegLog); + break; + case LDXP_w: + case LDAXP_w: + WriteWRegister(rt, Memory::Read(address), NoRegLog); + WriteWRegister(rt2, + Memory::Read(address + element_size), + NoRegLog); + break; + case LDXP_x: + case LDAXP_x: + WriteXRegister(rt, Memory::Read(address), NoRegLog); + WriteXRegister(rt2, + Memory::Read(address + element_size), + NoRegLog); + break; + default: + VIXL_UNREACHABLE(); + } + + if (is_acquire_release) { + // Approximate load-acquire by issuing a full barrier after the load. + __sync_synchronize(); + } + + LogRead(address, rt, GetPrintRegisterFormatForSize(element_size)); + if (is_pair) { + LogRead(address + element_size, + rt2, + GetPrintRegisterFormatForSize(element_size)); + } + } else { + if (is_acquire_release) { + // Approximate store-release by issuing a full barrier before the + // store. + __sync_synchronize(); + } + + bool do_store = true; + if (is_exclusive) { + do_store = local_monitor_.IsExclusive(address, access_size) && + global_monitor_.IsExclusive(address, access_size); + WriteWRegister(rs, do_store ? 0 : 1); + + // - All exclusive stores explicitly clear the local monitor. + local_monitor_.Clear(); + } else { + // - Any other store can clear the local monitor as a side effect. + local_monitor_.MaybeClear(); + } + + if (do_store) { + switch (op) { + case STXRB_w: + case STLXRB_w: + case STLRB_w: + case STLLRB: + Memory::Write(address, ReadWRegister(rt)); + break; + case STXRH_w: + case STLXRH_w: + case STLRH_w: + case STLLRH: + Memory::Write(address, ReadWRegister(rt)); + break; + case STXR_w: + case STLXR_w: + case STLR_w: + case STLLR_w: + Memory::Write(address, ReadWRegister(rt)); + break; + case STXR_x: + case STLXR_x: + case STLR_x: + case STLLR_x: + Memory::Write(address, ReadXRegister(rt)); + break; + case STXP_w: + case STLXP_w: + Memory::Write(address, ReadWRegister(rt)); + Memory::Write(address + element_size, + ReadWRegister(rt2)); + break; + case STXP_x: + case STLXP_x: + Memory::Write(address, ReadXRegister(rt)); + Memory::Write(address + element_size, + ReadXRegister(rt2)); + break; + default: + VIXL_UNREACHABLE(); + } + + LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size)); + if (is_pair) { + LogWrite(address + element_size, + rt2, + GetPrintRegisterFormatForSize(element_size)); + } + } + } + } +} + +template +void Simulator::AtomicMemorySimpleHelper(const Instruction* instr) { + unsigned rs = instr->GetRs(); + unsigned rt = instr->GetRt(); + unsigned rn = instr->GetRn(); + + bool is_acquire = (instr->ExtractBit(23) == 1) && (rt != kZeroRegCode); + bool is_release = instr->ExtractBit(22) == 1; + + unsigned element_size = sizeof(T); + uint64_t address = ReadRegister(rn, Reg31IsStackPointer); + + CheckIsValidUnalignedAtomicAccess(rn, address, element_size); + + T value = ReadRegister(rs); + + T data = Memory::Read(address); + + if (is_acquire) { + // Approximate load-acquire by issuing a full barrier after the load. + __sync_synchronize(); + } + + T result = 0; + switch (instr->Mask(AtomicMemorySimpleOpMask)) { + case LDADDOp: + result = data + value; + break; + case LDCLROp: + VIXL_ASSERT(!std::numeric_limits::is_signed); + result = data & ~value; + break; + case LDEOROp: + VIXL_ASSERT(!std::numeric_limits::is_signed); + result = data ^ value; + break; + case LDSETOp: + VIXL_ASSERT(!std::numeric_limits::is_signed); + result = data | value; + break; + + // Signed/Unsigned difference is done via the templated type T. + case LDSMAXOp: + case LDUMAXOp: + result = (data > value) ? data : value; + break; + case LDSMINOp: + case LDUMINOp: + result = (data > value) ? value : data; + break; + } + + if (is_release) { + // Approximate store-release by issuing a full barrier before the store. + __sync_synchronize(); + } + + Memory::Write(address, result); + WriteRegister(rt, data, NoRegLog); + + LogRead(address, rt, GetPrintRegisterFormatForSize(element_size)); + LogWrite(address, rs, GetPrintRegisterFormatForSize(element_size)); +} + +template +void Simulator::AtomicMemorySwapHelper(const Instruction* instr) { + unsigned rs = instr->GetRs(); + unsigned rt = instr->GetRt(); + unsigned rn = instr->GetRn(); + + bool is_acquire = (instr->ExtractBit(23) == 1) && (rt != kZeroRegCode); + bool is_release = instr->ExtractBit(22) == 1; + + unsigned element_size = sizeof(T); + uint64_t address = ReadRegister(rn, Reg31IsStackPointer); + + CheckIsValidUnalignedAtomicAccess(rn, address, element_size); + + T data = Memory::Read(address); + if (is_acquire) { + // Approximate load-acquire by issuing a full barrier after the load. + __sync_synchronize(); + } + + if (is_release) { + // Approximate store-release by issuing a full barrier before the store. + __sync_synchronize(); + } + Memory::Write(address, ReadRegister(rs)); + + WriteRegister(rt, data); + + LogRead(address, rt, GetPrintRegisterFormat(element_size)); + LogWrite(address, rs, GetPrintRegisterFormat(element_size)); +} + +template +void Simulator::LoadAcquireRCpcHelper(const Instruction* instr) { + unsigned rt = instr->GetRt(); + unsigned rn = instr->GetRn(); + + unsigned element_size = sizeof(T); + uint64_t address = ReadRegister(rn, Reg31IsStackPointer); + + CheckIsValidUnalignedAtomicAccess(rn, address, element_size); + + WriteRegister(rt, Memory::Read(address)); + + // Approximate load-acquire by issuing a full barrier after the load. + __sync_synchronize(); + + LogRead(address, rt, GetPrintRegisterFormat(element_size)); +} + +#define ATOMIC_MEMORY_SIMPLE_UINT_LIST(V) \ + V(LDADD) \ + V(LDCLR) \ + V(LDEOR) \ + V(LDSET) \ + V(LDUMAX) \ + V(LDUMIN) + +#define ATOMIC_MEMORY_SIMPLE_INT_LIST(V) \ + V(LDSMAX) \ + V(LDSMIN) + +void Simulator::VisitAtomicMemory(const Instruction* instr) { + switch (instr->Mask(AtomicMemoryMask)) { +// clang-format off +#define SIM_FUNC_B(A) \ + case A##B: \ + case A##AB: \ + case A##LB: \ + case A##ALB: +#define SIM_FUNC_H(A) \ + case A##H: \ + case A##AH: \ + case A##LH: \ + case A##ALH: +#define SIM_FUNC_w(A) \ + case A##_w: \ + case A##A_w: \ + case A##L_w: \ + case A##AL_w: +#define SIM_FUNC_x(A) \ + case A##_x: \ + case A##A_x: \ + case A##L_x: \ + case A##AL_x: + + ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_B) + AtomicMemorySimpleHelper(instr); + break; + ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_B) + AtomicMemorySimpleHelper(instr); + break; + ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_H) + AtomicMemorySimpleHelper(instr); + break; + ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_H) + AtomicMemorySimpleHelper(instr); + break; + ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_w) + AtomicMemorySimpleHelper(instr); + break; + ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_w) + AtomicMemorySimpleHelper(instr); + break; + ATOMIC_MEMORY_SIMPLE_UINT_LIST(SIM_FUNC_x) + AtomicMemorySimpleHelper(instr); + break; + ATOMIC_MEMORY_SIMPLE_INT_LIST(SIM_FUNC_x) + AtomicMemorySimpleHelper(instr); + break; + // clang-format on + + case SWPB: + case SWPAB: + case SWPLB: + case SWPALB: + AtomicMemorySwapHelper(instr); + break; + case SWPH: + case SWPAH: + case SWPLH: + case SWPALH: + AtomicMemorySwapHelper(instr); + break; + case SWP_w: + case SWPA_w: + case SWPL_w: + case SWPAL_w: + AtomicMemorySwapHelper(instr); + break; + case SWP_x: + case SWPA_x: + case SWPL_x: + case SWPAL_x: + AtomicMemorySwapHelper(instr); + break; + case LDAPRB: + LoadAcquireRCpcHelper(instr); + break; + case LDAPRH: + LoadAcquireRCpcHelper(instr); + break; + case LDAPR_w: + LoadAcquireRCpcHelper(instr); + break; + case LDAPR_x: + LoadAcquireRCpcHelper(instr); + break; + } +} + + +void Simulator::VisitLoadLiteral(const Instruction* instr) { + unsigned rt = instr->GetRt(); + uint64_t address = instr->GetLiteralAddress(); + + // Verify that the calculated address is available to the host. + VIXL_ASSERT(address == static_cast(address)); + + switch (instr->Mask(LoadLiteralMask)) { + // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_VREGS), then + // print a more detailed log. + case LDR_w_lit: + WriteWRegister(rt, Memory::Read(address), NoRegLog); + LogRead(address, rt, kPrintWReg); + break; + case LDR_x_lit: + WriteXRegister(rt, Memory::Read(address), NoRegLog); + LogRead(address, rt, kPrintXReg); + break; + case LDR_s_lit: + WriteSRegister(rt, Memory::Read(address), NoRegLog); + LogVRead(address, rt, kPrintSReg); + break; + case LDR_d_lit: + WriteDRegister(rt, Memory::Read(address), NoRegLog); + LogVRead(address, rt, kPrintDReg); + break; + case LDR_q_lit: + WriteQRegister(rt, Memory::Read(address), NoRegLog); + LogVRead(address, rt, kPrintReg1Q); + break; + case LDRSW_x_lit: + WriteXRegister(rt, Memory::Read(address), NoRegLog); + LogRead(address, rt, kPrintWReg); + break; + + // Ignore prfm hint instructions. + case PRFM_lit: + break; + + default: + VIXL_UNREACHABLE(); + } + + local_monitor_.MaybeClear(); +} + + +uintptr_t Simulator::AddressModeHelper(unsigned addr_reg, + int64_t offset, + AddrMode addrmode) { + uint64_t address = ReadXRegister(addr_reg, Reg31IsStackPointer); + + if ((addr_reg == 31) && ((address % 16) != 0)) { + // When the base register is SP the stack pointer is required to be + // quadword aligned prior to the address calculation and write-backs. + // Misalignment will cause a stack alignment fault. + VIXL_ALIGNMENT_EXCEPTION(); + } + + if ((addrmode == PreIndex) || (addrmode == PostIndex)) { + VIXL_ASSERT(offset != 0); + // Only preindex should log the register update here. For Postindex, the + // update will be printed automatically by LogWrittenRegisters _after_ the + // memory access itself is logged. + RegLogMode log_mode = (addrmode == PreIndex) ? LogRegWrites : NoRegLog; + WriteXRegister(addr_reg, address + offset, log_mode, Reg31IsStackPointer); + } + + if ((addrmode == Offset) || (addrmode == PreIndex)) { + address += offset; + } + + // Verify that the calculated address is available to the host. + VIXL_ASSERT(address == static_cast(address)); + + return static_cast(address); +} + + +void Simulator::VisitMoveWideImmediate(const Instruction* instr) { + MoveWideImmediateOp mov_op = + static_cast(instr->Mask(MoveWideImmediateMask)); + int64_t new_xn_val = 0; + + bool is_64_bits = instr->GetSixtyFourBits() == 1; + // Shift is limited for W operations. + VIXL_ASSERT(is_64_bits || (instr->GetShiftMoveWide() < 2)); + + // Get the shifted immediate. + int64_t shift = instr->GetShiftMoveWide() * 16; + int64_t shifted_imm16 = static_cast(instr->GetImmMoveWide()) + << shift; + + // Compute the new value. + switch (mov_op) { + case MOVN_w: + case MOVN_x: { + new_xn_val = ~shifted_imm16; + if (!is_64_bits) new_xn_val &= kWRegMask; + break; + } + case MOVK_w: + case MOVK_x: { + unsigned reg_code = instr->GetRd(); + int64_t prev_xn_val = + is_64_bits ? ReadXRegister(reg_code) : ReadWRegister(reg_code); + new_xn_val = (prev_xn_val & ~(INT64_C(0xffff) << shift)) | shifted_imm16; + break; + } + case MOVZ_w: + case MOVZ_x: { + new_xn_val = shifted_imm16; + break; + } + default: + VIXL_UNREACHABLE(); + } + + // Update the destination register. + WriteXRegister(instr->GetRd(), new_xn_val); +} + + +void Simulator::VisitConditionalSelect(const Instruction* instr) { + uint64_t new_val = ReadXRegister(instr->GetRn()); + + if (ConditionFailed(static_cast(instr->GetCondition()))) { + new_val = ReadXRegister(instr->GetRm()); + switch (instr->Mask(ConditionalSelectMask)) { + case CSEL_w: + case CSEL_x: + break; + case CSINC_w: + case CSINC_x: + new_val++; + break; + case CSINV_w: + case CSINV_x: + new_val = ~new_val; + break; + case CSNEG_w: + case CSNEG_x: + new_val = -new_val; + break; + default: + VIXL_UNIMPLEMENTED(); + } + } + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + WriteRegister(reg_size, instr->GetRd(), new_val); +} + + +// clang-format off +#define PAUTH_MODES(V) \ + V(IA, ReadXRegister(src), kPACKeyIA, kInstructionPointer) \ + V(IB, ReadXRegister(src), kPACKeyIB, kInstructionPointer) \ + V(IZA, 0x00000000, kPACKeyIA, kInstructionPointer) \ + V(IZB, 0x00000000, kPACKeyIB, kInstructionPointer) \ + V(DA, ReadXRegister(src), kPACKeyDA, kDataPointer) \ + V(DB, ReadXRegister(src), kPACKeyDB, kDataPointer) \ + V(DZA, 0x00000000, kPACKeyDA, kDataPointer) \ + V(DZB, 0x00000000, kPACKeyDB, kDataPointer) +// clang-format on + +void Simulator::VisitDataProcessing1Source(const Instruction* instr) { + unsigned dst = instr->GetRd(); + unsigned src = instr->GetRn(); + + switch (instr->Mask(DataProcessing1SourceMask)) { +#define DEFINE_PAUTH_FUNCS(SUFFIX, MOD, KEY, D) \ + case PAC##SUFFIX: { \ + uint64_t ptr = ReadXRegister(dst); \ + WriteXRegister(dst, AddPAC(ptr, MOD, KEY, D)); \ + break; \ + } \ + case AUT##SUFFIX: { \ + uint64_t ptr = ReadXRegister(dst); \ + WriteXRegister(dst, AuthPAC(ptr, MOD, KEY, D)); \ + break; \ + } + + PAUTH_MODES(DEFINE_PAUTH_FUNCS) +#undef DEFINE_PAUTH_FUNCS + + case XPACI: + WriteXRegister(dst, StripPAC(ReadXRegister(dst), kInstructionPointer)); + break; + case XPACD: + WriteXRegister(dst, StripPAC(ReadXRegister(dst), kDataPointer)); + break; + case RBIT_w: + WriteWRegister(dst, ReverseBits(ReadWRegister(src))); + break; + case RBIT_x: + WriteXRegister(dst, ReverseBits(ReadXRegister(src))); + break; + case REV16_w: + WriteWRegister(dst, ReverseBytes(ReadWRegister(src), 1)); + break; + case REV16_x: + WriteXRegister(dst, ReverseBytes(ReadXRegister(src), 1)); + break; + case REV_w: + WriteWRegister(dst, ReverseBytes(ReadWRegister(src), 2)); + break; + case REV32_x: + WriteXRegister(dst, ReverseBytes(ReadXRegister(src), 2)); + break; + case REV_x: + WriteXRegister(dst, ReverseBytes(ReadXRegister(src), 3)); + break; + case CLZ_w: + WriteWRegister(dst, CountLeadingZeros(ReadWRegister(src))); + break; + case CLZ_x: + WriteXRegister(dst, CountLeadingZeros(ReadXRegister(src))); + break; + case CLS_w: + WriteWRegister(dst, CountLeadingSignBits(ReadWRegister(src))); + break; + case CLS_x: + WriteXRegister(dst, CountLeadingSignBits(ReadXRegister(src))); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +uint32_t Simulator::Poly32Mod2(unsigned n, uint64_t data, uint32_t poly) { + VIXL_ASSERT((n > 32) && (n <= 64)); + for (unsigned i = (n - 1); i >= 32; i--) { + if (((data >> i) & 1) != 0) { + uint64_t polysh32 = (uint64_t)poly << (i - 32); + uint64_t mask = (UINT64_C(1) << i) - 1; + data = ((data & mask) ^ polysh32); + } + } + return data & 0xffffffff; +} + + +template +uint32_t Simulator::Crc32Checksum(uint32_t acc, T val, uint32_t poly) { + unsigned size = sizeof(val) * 8; // Number of bits in type T. + VIXL_ASSERT((size == 8) || (size == 16) || (size == 32)); + uint64_t tempacc = static_cast(ReverseBits(acc)) << size; + uint64_t tempval = static_cast(ReverseBits(val)) << 32; + return ReverseBits(Poly32Mod2(32 + size, tempacc ^ tempval, poly)); +} + + +uint32_t Simulator::Crc32Checksum(uint32_t acc, uint64_t val, uint32_t poly) { + // Poly32Mod2 cannot handle inputs with more than 32 bits, so compute + // the CRC of each 32-bit word sequentially. + acc = Crc32Checksum(acc, (uint32_t)(val & 0xffffffff), poly); + return Crc32Checksum(acc, (uint32_t)(val >> 32), poly); +} + + +void Simulator::VisitDataProcessing2Source(const Instruction* instr) { + Shift shift_op = NO_SHIFT; + int64_t result = 0; + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + + switch (instr->Mask(DataProcessing2SourceMask)) { + case SDIV_w: { + int32_t rn = ReadWRegister(instr->GetRn()); + int32_t rm = ReadWRegister(instr->GetRm()); + if ((rn == kWMinInt) && (rm == -1)) { + result = kWMinInt; + } else if (rm == 0) { + // Division by zero can be trapped, but not on A-class processors. + result = 0; + } else { + result = rn / rm; + } + break; + } + case SDIV_x: { + int64_t rn = ReadXRegister(instr->GetRn()); + int64_t rm = ReadXRegister(instr->GetRm()); + if ((rn == kXMinInt) && (rm == -1)) { + result = kXMinInt; + } else if (rm == 0) { + // Division by zero can be trapped, but not on A-class processors. + result = 0; + } else { + result = rn / rm; + } + break; + } + case UDIV_w: { + uint32_t rn = static_cast(ReadWRegister(instr->GetRn())); + uint32_t rm = static_cast(ReadWRegister(instr->GetRm())); + if (rm == 0) { + // Division by zero can be trapped, but not on A-class processors. + result = 0; + } else { + result = rn / rm; + } + break; + } + case UDIV_x: { + uint64_t rn = static_cast(ReadXRegister(instr->GetRn())); + uint64_t rm = static_cast(ReadXRegister(instr->GetRm())); + if (rm == 0) { + // Division by zero can be trapped, but not on A-class processors. + result = 0; + } else { + result = rn / rm; + } + break; + } + case LSLV_w: + case LSLV_x: + shift_op = LSL; + break; + case LSRV_w: + case LSRV_x: + shift_op = LSR; + break; + case ASRV_w: + case ASRV_x: + shift_op = ASR; + break; + case RORV_w: + case RORV_x: + shift_op = ROR; + break; + case PACGA: { + uint64_t dst = static_cast(ReadXRegister(instr->GetRn())); + uint64_t src = static_cast( + ReadXRegister(instr->GetRm(), Reg31IsStackPointer)); + uint64_t code = ComputePAC(dst, src, kPACKeyGA); + result = code & 0xffffffff00000000; + break; + } + case CRC32B: { + uint32_t acc = ReadRegister(instr->GetRn()); + uint8_t val = ReadRegister(instr->GetRm()); + result = Crc32Checksum(acc, val, CRC32_POLY); + break; + } + case CRC32H: { + uint32_t acc = ReadRegister(instr->GetRn()); + uint16_t val = ReadRegister(instr->GetRm()); + result = Crc32Checksum(acc, val, CRC32_POLY); + break; + } + case CRC32W: { + uint32_t acc = ReadRegister(instr->GetRn()); + uint32_t val = ReadRegister(instr->GetRm()); + result = Crc32Checksum(acc, val, CRC32_POLY); + break; + } + case CRC32X: { + uint32_t acc = ReadRegister(instr->GetRn()); + uint64_t val = ReadRegister(instr->GetRm()); + result = Crc32Checksum(acc, val, CRC32_POLY); + reg_size = kWRegSize; + break; + } + case CRC32CB: { + uint32_t acc = ReadRegister(instr->GetRn()); + uint8_t val = ReadRegister(instr->GetRm()); + result = Crc32Checksum(acc, val, CRC32C_POLY); + break; + } + case CRC32CH: { + uint32_t acc = ReadRegister(instr->GetRn()); + uint16_t val = ReadRegister(instr->GetRm()); + result = Crc32Checksum(acc, val, CRC32C_POLY); + break; + } + case CRC32CW: { + uint32_t acc = ReadRegister(instr->GetRn()); + uint32_t val = ReadRegister(instr->GetRm()); + result = Crc32Checksum(acc, val, CRC32C_POLY); + break; + } + case CRC32CX: { + uint32_t acc = ReadRegister(instr->GetRn()); + uint64_t val = ReadRegister(instr->GetRm()); + result = Crc32Checksum(acc, val, CRC32C_POLY); + reg_size = kWRegSize; + break; + } + default: + VIXL_UNIMPLEMENTED(); + } + + if (shift_op != NO_SHIFT) { + // Shift distance encoded in the least-significant five/six bits of the + // register. + int mask = (instr->GetSixtyFourBits() == 1) ? 0x3f : 0x1f; + unsigned shift = ReadWRegister(instr->GetRm()) & mask; + result = ShiftOperand(reg_size, + ReadRegister(reg_size, instr->GetRn()), + shift_op, + shift); + } + WriteRegister(reg_size, instr->GetRd(), result); +} + + +// The algorithm used is adapted from the one described in section 8.2 of +// Hacker's Delight, by Henry S. Warren, Jr. +template +static int64_t MultiplyHigh(T u, T v) { + uint64_t u0, v0, w0, u1, v1, w1, w2, t; + uint64_t sign_mask = UINT64_C(0x8000000000000000); + uint64_t sign_ext = 0; + if (std::numeric_limits::is_signed) { + sign_ext = UINT64_C(0xffffffff00000000); + } + + VIXL_ASSERT(sizeof(u) == sizeof(uint64_t)); + VIXL_ASSERT(sizeof(u) == sizeof(u0)); + + u0 = u & 0xffffffff; + u1 = u >> 32 | (((u & sign_mask) != 0) ? sign_ext : 0); + v0 = v & 0xffffffff; + v1 = v >> 32 | (((v & sign_mask) != 0) ? sign_ext : 0); + + w0 = u0 * v0; + t = u1 * v0 + (w0 >> 32); + + w1 = t & 0xffffffff; + w2 = t >> 32 | (((t & sign_mask) != 0) ? sign_ext : 0); + w1 = u0 * v1 + w1; + w1 = w1 >> 32 | (((w1 & sign_mask) != 0) ? sign_ext : 0); + + uint64_t value = u1 * v1 + w2 + w1; + int64_t result; + memcpy(&result, &value, sizeof(result)); + return result; +} + + +void Simulator::VisitDataProcessing3Source(const Instruction* instr) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + + uint64_t result = 0; + // Extract and sign- or zero-extend 32-bit arguments for widening operations. + uint64_t rn_u32 = ReadRegister(instr->GetRn()); + uint64_t rm_u32 = ReadRegister(instr->GetRm()); + int64_t rn_s32 = ReadRegister(instr->GetRn()); + int64_t rm_s32 = ReadRegister(instr->GetRm()); + uint64_t rn_u64 = ReadXRegister(instr->GetRn()); + uint64_t rm_u64 = ReadXRegister(instr->GetRm()); + switch (instr->Mask(DataProcessing3SourceMask)) { + case MADD_w: + case MADD_x: + result = ReadXRegister(instr->GetRa()) + (rn_u64 * rm_u64); + break; + case MSUB_w: + case MSUB_x: + result = ReadXRegister(instr->GetRa()) - (rn_u64 * rm_u64); + break; + case SMADDL_x: + result = ReadXRegister(instr->GetRa()) + + static_cast(rn_s32 * rm_s32); + break; + case SMSUBL_x: + result = ReadXRegister(instr->GetRa()) - + static_cast(rn_s32 * rm_s32); + break; + case UMADDL_x: + result = ReadXRegister(instr->GetRa()) + (rn_u32 * rm_u32); + break; + case UMSUBL_x: + result = ReadXRegister(instr->GetRa()) - (rn_u32 * rm_u32); + break; + case UMULH_x: + result = MultiplyHigh(ReadRegister(instr->GetRn()), + ReadRegister(instr->GetRm())); + break; + case SMULH_x: + result = MultiplyHigh(ReadXRegister(instr->GetRn()), + ReadXRegister(instr->GetRm())); + break; + default: + VIXL_UNIMPLEMENTED(); + } + WriteRegister(reg_size, instr->GetRd(), result); +} + + +void Simulator::VisitBitfield(const Instruction* instr) { + unsigned reg_size = instr->GetSixtyFourBits() ? kXRegSize : kWRegSize; + int64_t reg_mask = instr->GetSixtyFourBits() ? kXRegMask : kWRegMask; + int R = instr->GetImmR(); + int S = instr->GetImmS(); + int diff = S - R; + uint64_t mask; + if (diff >= 0) { + mask = ~UINT64_C(0) >> (64 - (diff + 1)); + mask = (static_cast(diff) < (reg_size - 1)) ? mask : reg_mask; + } else { + mask = ~UINT64_C(0) >> (64 - (S + 1)); + mask = RotateRight(mask, R, reg_size); + diff += reg_size; + } + + // inzero indicates if the extracted bitfield is inserted into the + // destination register value or in zero. + // If extend is true, extend the sign of the extracted bitfield. + bool inzero = false; + bool extend = false; + switch (instr->Mask(BitfieldMask)) { + case BFM_x: + case BFM_w: + break; + case SBFM_x: + case SBFM_w: + inzero = true; + extend = true; + break; + case UBFM_x: + case UBFM_w: + inzero = true; + break; + default: + VIXL_UNIMPLEMENTED(); + } + + uint64_t dst = inzero ? 0 : ReadRegister(reg_size, instr->GetRd()); + uint64_t src = ReadRegister(reg_size, instr->GetRn()); + // Rotate source bitfield into place. + uint64_t result = RotateRight(src, R, reg_size); + // Determine the sign extension. + uint64_t topbits = (diff == 63) ? 0 : (~UINT64_C(0) << (diff + 1)); + uint64_t signbits = extend && ((src >> S) & 1) ? topbits : 0; + + // Merge sign extension, dest/zero and bitfield. + result = signbits | (result & mask) | (dst & ~mask); + + WriteRegister(reg_size, instr->GetRd(), result); +} + + +void Simulator::VisitExtract(const Instruction* instr) { + unsigned lsb = instr->GetImmS(); + unsigned reg_size = (instr->GetSixtyFourBits() == 1) ? kXRegSize : kWRegSize; + uint64_t low_res = + static_cast(ReadRegister(reg_size, instr->GetRm())) >> lsb; + uint64_t high_res = + (lsb == 0) ? 0 : ReadRegister(reg_size, instr->GetRn()) + << (reg_size - lsb); + WriteRegister(reg_size, instr->GetRd(), low_res | high_res); +} + + +void Simulator::VisitFPImmediate(const Instruction* instr) { + AssertSupportedFPCR(); + unsigned dest = instr->GetRd(); + switch (instr->Mask(FPImmediateMask)) { + case FMOV_h_imm: + WriteHRegister(dest, Float16ToRawbits(instr->GetImmFP16())); + break; + case FMOV_s_imm: + WriteSRegister(dest, instr->GetImmFP32()); + break; + case FMOV_d_imm: + WriteDRegister(dest, instr->GetImmFP64()); + break; + default: + VIXL_UNREACHABLE(); + } +} + + +void Simulator::VisitFPIntegerConvert(const Instruction* instr) { + AssertSupportedFPCR(); + + unsigned dst = instr->GetRd(); + unsigned src = instr->GetRn(); + + FPRounding round = ReadRMode(); + + switch (instr->Mask(FPIntegerConvertMask)) { + case FCVTAS_wh: + WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPTieAway)); + break; + case FCVTAS_xh: + WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPTieAway)); + break; + case FCVTAS_ws: + WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPTieAway)); + break; + case FCVTAS_xs: + WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPTieAway)); + break; + case FCVTAS_wd: + WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPTieAway)); + break; + case FCVTAS_xd: + WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPTieAway)); + break; + case FCVTAU_wh: + WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPTieAway)); + break; + case FCVTAU_xh: + WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPTieAway)); + break; + case FCVTAU_ws: + WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPTieAway)); + break; + case FCVTAU_xs: + WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPTieAway)); + break; + case FCVTAU_wd: + WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPTieAway)); + break; + case FCVTAU_xd: + WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPTieAway)); + break; + case FCVTMS_wh: + WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPNegativeInfinity)); + break; + case FCVTMS_xh: + WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPNegativeInfinity)); + break; + case FCVTMS_ws: + WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPNegativeInfinity)); + break; + case FCVTMS_xs: + WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPNegativeInfinity)); + break; + case FCVTMS_wd: + WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPNegativeInfinity)); + break; + case FCVTMS_xd: + WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPNegativeInfinity)); + break; + case FCVTMU_wh: + WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPNegativeInfinity)); + break; + case FCVTMU_xh: + WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPNegativeInfinity)); + break; + case FCVTMU_ws: + WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPNegativeInfinity)); + break; + case FCVTMU_xs: + WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPNegativeInfinity)); + break; + case FCVTMU_wd: + WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPNegativeInfinity)); + break; + case FCVTMU_xd: + WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPNegativeInfinity)); + break; + case FCVTPS_wh: + WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPPositiveInfinity)); + break; + case FCVTPS_xh: + WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPPositiveInfinity)); + break; + case FCVTPS_ws: + WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPPositiveInfinity)); + break; + case FCVTPS_xs: + WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPPositiveInfinity)); + break; + case FCVTPS_wd: + WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPPositiveInfinity)); + break; + case FCVTPS_xd: + WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPPositiveInfinity)); + break; + case FCVTPU_wh: + WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPPositiveInfinity)); + break; + case FCVTPU_xh: + WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPPositiveInfinity)); + break; + case FCVTPU_ws: + WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPPositiveInfinity)); + break; + case FCVTPU_xs: + WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPPositiveInfinity)); + break; + case FCVTPU_wd: + WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPPositiveInfinity)); + break; + case FCVTPU_xd: + WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPPositiveInfinity)); + break; + case FCVTNS_wh: + WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPTieEven)); + break; + case FCVTNS_xh: + WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPTieEven)); + break; + case FCVTNS_ws: + WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPTieEven)); + break; + case FCVTNS_xs: + WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPTieEven)); + break; + case FCVTNS_wd: + WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPTieEven)); + break; + case FCVTNS_xd: + WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPTieEven)); + break; + case FCVTNU_wh: + WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPTieEven)); + break; + case FCVTNU_xh: + WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPTieEven)); + break; + case FCVTNU_ws: + WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPTieEven)); + break; + case FCVTNU_xs: + WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPTieEven)); + break; + case FCVTNU_wd: + WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPTieEven)); + break; + case FCVTNU_xd: + WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPTieEven)); + break; + case FCVTZS_wh: + WriteWRegister(dst, FPToInt32(ReadHRegister(src), FPZero)); + break; + case FCVTZS_xh: + WriteXRegister(dst, FPToInt64(ReadHRegister(src), FPZero)); + break; + case FCVTZS_ws: + WriteWRegister(dst, FPToInt32(ReadSRegister(src), FPZero)); + break; + case FCVTZS_xs: + WriteXRegister(dst, FPToInt64(ReadSRegister(src), FPZero)); + break; + case FCVTZS_wd: + WriteWRegister(dst, FPToInt32(ReadDRegister(src), FPZero)); + break; + case FCVTZS_xd: + WriteXRegister(dst, FPToInt64(ReadDRegister(src), FPZero)); + break; + case FCVTZU_wh: + WriteWRegister(dst, FPToUInt32(ReadHRegister(src), FPZero)); + break; + case FCVTZU_xh: + WriteXRegister(dst, FPToUInt64(ReadHRegister(src), FPZero)); + break; + case FCVTZU_ws: + WriteWRegister(dst, FPToUInt32(ReadSRegister(src), FPZero)); + break; + case FCVTZU_xs: + WriteXRegister(dst, FPToUInt64(ReadSRegister(src), FPZero)); + break; + case FCVTZU_wd: + WriteWRegister(dst, FPToUInt32(ReadDRegister(src), FPZero)); + break; + case FCVTZU_xd: + WriteXRegister(dst, FPToUInt64(ReadDRegister(src), FPZero)); + break; + case FJCVTZS: + WriteWRegister(dst, FPToFixedJS(ReadDRegister(src))); + break; + case FMOV_hw: + WriteHRegister(dst, ReadWRegister(src) & kHRegMask); + break; + case FMOV_wh: + WriteWRegister(dst, ReadHRegisterBits(src)); + break; + case FMOV_xh: + WriteXRegister(dst, ReadHRegisterBits(src)); + break; + case FMOV_hx: + WriteHRegister(dst, ReadXRegister(src) & kHRegMask); + break; + case FMOV_ws: + WriteWRegister(dst, ReadSRegisterBits(src)); + break; + case FMOV_xd: + WriteXRegister(dst, ReadDRegisterBits(src)); + break; + case FMOV_sw: + WriteSRegisterBits(dst, ReadWRegister(src)); + break; + case FMOV_dx: + WriteDRegisterBits(dst, ReadXRegister(src)); + break; + case FMOV_d1_x: + LogicVRegister(ReadVRegister(dst)) + .SetUint(kFormatD, 1, ReadXRegister(src)); + break; + case FMOV_x_d1: + WriteXRegister(dst, LogicVRegister(ReadVRegister(src)).Uint(kFormatD, 1)); + break; + + // A 32-bit input can be handled in the same way as a 64-bit input, since + // the sign- or zero-extension will not affect the conversion. + case SCVTF_dx: + WriteDRegister(dst, FixedToDouble(ReadXRegister(src), 0, round)); + break; + case SCVTF_dw: + WriteDRegister(dst, FixedToDouble(ReadWRegister(src), 0, round)); + break; + case UCVTF_dx: + WriteDRegister(dst, UFixedToDouble(ReadXRegister(src), 0, round)); + break; + case UCVTF_dw: { + WriteDRegister(dst, + UFixedToDouble(ReadRegister(src), 0, round)); + break; + } + case SCVTF_sx: + WriteSRegister(dst, FixedToFloat(ReadXRegister(src), 0, round)); + break; + case SCVTF_sw: + WriteSRegister(dst, FixedToFloat(ReadWRegister(src), 0, round)); + break; + case UCVTF_sx: + WriteSRegister(dst, UFixedToFloat(ReadXRegister(src), 0, round)); + break; + case UCVTF_sw: { + WriteSRegister(dst, UFixedToFloat(ReadRegister(src), 0, round)); + break; + } + case SCVTF_hx: + WriteHRegister(dst, FixedToFloat16(ReadXRegister(src), 0, round)); + break; + case SCVTF_hw: + WriteHRegister(dst, FixedToFloat16(ReadWRegister(src), 0, round)); + break; + case UCVTF_hx: + WriteHRegister(dst, UFixedToFloat16(ReadXRegister(src), 0, round)); + break; + case UCVTF_hw: { + WriteHRegister(dst, + UFixedToFloat16(ReadRegister(src), 0, round)); + break; + } + + default: + VIXL_UNREACHABLE(); + } +} + + +void Simulator::VisitFPFixedPointConvert(const Instruction* instr) { + AssertSupportedFPCR(); + + unsigned dst = instr->GetRd(); + unsigned src = instr->GetRn(); + int fbits = 64 - instr->GetFPScale(); + + FPRounding round = ReadRMode(); + + switch (instr->Mask(FPFixedPointConvertMask)) { + // A 32-bit input can be handled in the same way as a 64-bit input, since + // the sign- or zero-extension will not affect the conversion. + case SCVTF_dx_fixed: + WriteDRegister(dst, FixedToDouble(ReadXRegister(src), fbits, round)); + break; + case SCVTF_dw_fixed: + WriteDRegister(dst, FixedToDouble(ReadWRegister(src), fbits, round)); + break; + case UCVTF_dx_fixed: + WriteDRegister(dst, UFixedToDouble(ReadXRegister(src), fbits, round)); + break; + case UCVTF_dw_fixed: { + WriteDRegister(dst, + UFixedToDouble(ReadRegister(src), fbits, round)); + break; + } + case SCVTF_sx_fixed: + WriteSRegister(dst, FixedToFloat(ReadXRegister(src), fbits, round)); + break; + case SCVTF_sw_fixed: + WriteSRegister(dst, FixedToFloat(ReadWRegister(src), fbits, round)); + break; + case UCVTF_sx_fixed: + WriteSRegister(dst, UFixedToFloat(ReadXRegister(src), fbits, round)); + break; + case UCVTF_sw_fixed: { + WriteSRegister(dst, + UFixedToFloat(ReadRegister(src), fbits, round)); + break; + } + case SCVTF_hx_fixed: + WriteHRegister(dst, FixedToFloat16(ReadXRegister(src), fbits, round)); + break; + case SCVTF_hw_fixed: + WriteHRegister(dst, FixedToFloat16(ReadWRegister(src), fbits, round)); + break; + case UCVTF_hx_fixed: + WriteHRegister(dst, UFixedToFloat16(ReadXRegister(src), fbits, round)); + break; + case UCVTF_hw_fixed: { + WriteHRegister(dst, + UFixedToFloat16(ReadRegister(src), + fbits, + round)); + break; + } + case FCVTZS_xd_fixed: + WriteXRegister(dst, + FPToInt64(ReadDRegister(src) * std::pow(2.0, fbits), + FPZero)); + break; + case FCVTZS_wd_fixed: + WriteWRegister(dst, + FPToInt32(ReadDRegister(src) * std::pow(2.0, fbits), + FPZero)); + break; + case FCVTZU_xd_fixed: + WriteXRegister(dst, + FPToUInt64(ReadDRegister(src) * std::pow(2.0, fbits), + FPZero)); + break; + case FCVTZU_wd_fixed: + WriteWRegister(dst, + FPToUInt32(ReadDRegister(src) * std::pow(2.0, fbits), + FPZero)); + break; + case FCVTZS_xs_fixed: + WriteXRegister(dst, + FPToInt64(ReadSRegister(src) * std::pow(2.0f, fbits), + FPZero)); + break; + case FCVTZS_ws_fixed: + WriteWRegister(dst, + FPToInt32(ReadSRegister(src) * std::pow(2.0f, fbits), + FPZero)); + break; + case FCVTZU_xs_fixed: + WriteXRegister(dst, + FPToUInt64(ReadSRegister(src) * std::pow(2.0f, fbits), + FPZero)); + break; + case FCVTZU_ws_fixed: + WriteWRegister(dst, + FPToUInt32(ReadSRegister(src) * std::pow(2.0f, fbits), + FPZero)); + break; + case FCVTZS_xh_fixed: { + double output = + static_cast(ReadHRegister(src)) * std::pow(2.0, fbits); + WriteXRegister(dst, FPToInt64(output, FPZero)); + break; + } + case FCVTZS_wh_fixed: { + double output = + static_cast(ReadHRegister(src)) * std::pow(2.0, fbits); + WriteWRegister(dst, FPToInt32(output, FPZero)); + break; + } + case FCVTZU_xh_fixed: { + double output = + static_cast(ReadHRegister(src)) * std::pow(2.0, fbits); + WriteXRegister(dst, FPToUInt64(output, FPZero)); + break; + } + case FCVTZU_wh_fixed: { + double output = + static_cast(ReadHRegister(src)) * std::pow(2.0, fbits); + WriteWRegister(dst, FPToUInt32(output, FPZero)); + break; + } + default: + VIXL_UNREACHABLE(); + } +} + + +void Simulator::VisitFPCompare(const Instruction* instr) { + AssertSupportedFPCR(); + + FPTrapFlags trap = DisableTrap; + switch (instr->Mask(FPCompareMask)) { + case FCMPE_h: + trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCMP_h: + FPCompare(ReadHRegister(instr->GetRn()), + ReadHRegister(instr->GetRm()), + trap); + break; + case FCMPE_s: + trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCMP_s: + FPCompare(ReadSRegister(instr->GetRn()), + ReadSRegister(instr->GetRm()), + trap); + break; + case FCMPE_d: + trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCMP_d: + FPCompare(ReadDRegister(instr->GetRn()), + ReadDRegister(instr->GetRm()), + trap); + break; + case FCMPE_h_zero: + trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCMP_h_zero: + FPCompare(ReadHRegister(instr->GetRn()), SimFloat16(0.0), trap); + break; + case FCMPE_s_zero: + trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCMP_s_zero: + FPCompare(ReadSRegister(instr->GetRn()), 0.0f, trap); + break; + case FCMPE_d_zero: + trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCMP_d_zero: + FPCompare(ReadDRegister(instr->GetRn()), 0.0, trap); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitFPConditionalCompare(const Instruction* instr) { + AssertSupportedFPCR(); + + FPTrapFlags trap = DisableTrap; + switch (instr->Mask(FPConditionalCompareMask)) { + case FCCMPE_h: + trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCCMP_h: + if (ConditionPassed(instr->GetCondition())) { + FPCompare(ReadHRegister(instr->GetRn()), + ReadHRegister(instr->GetRm()), + trap); + } else { + ReadNzcv().SetFlags(instr->GetNzcv()); + LogSystemRegister(NZCV); + } + break; + case FCCMPE_s: + trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCCMP_s: + if (ConditionPassed(instr->GetCondition())) { + FPCompare(ReadSRegister(instr->GetRn()), + ReadSRegister(instr->GetRm()), + trap); + } else { + ReadNzcv().SetFlags(instr->GetNzcv()); + LogSystemRegister(NZCV); + } + break; + case FCCMPE_d: + trap = EnableTrap; + VIXL_FALLTHROUGH(); + case FCCMP_d: + if (ConditionPassed(instr->GetCondition())) { + FPCompare(ReadDRegister(instr->GetRn()), + ReadDRegister(instr->GetRm()), + trap); + } else { + ReadNzcv().SetFlags(instr->GetNzcv()); + LogSystemRegister(NZCV); + } + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitFPConditionalSelect(const Instruction* instr) { + AssertSupportedFPCR(); + + Instr selected; + if (ConditionPassed(instr->GetCondition())) { + selected = instr->GetRn(); + } else { + selected = instr->GetRm(); + } + + switch (instr->Mask(FPConditionalSelectMask)) { + case FCSEL_h: + WriteHRegister(instr->GetRd(), ReadHRegister(selected)); + break; + case FCSEL_s: + WriteSRegister(instr->GetRd(), ReadSRegister(selected)); + break; + case FCSEL_d: + WriteDRegister(instr->GetRd(), ReadDRegister(selected)); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitFPDataProcessing1Source(const Instruction* instr) { + AssertSupportedFPCR(); + + FPRounding fpcr_rounding = static_cast(ReadFpcr().GetRMode()); + VectorFormat vform; + switch (instr->Mask(FPTypeMask)) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case FP64: + vform = kFormatD; + break; + case FP32: + vform = kFormatS; + break; + case FP16: + vform = kFormatH; + break; + } + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + bool inexact_exception = false; + + unsigned fd = instr->GetRd(); + unsigned fn = instr->GetRn(); + + switch (instr->Mask(FPDataProcessing1SourceMask)) { + case FMOV_h: + WriteHRegister(fd, ReadHRegister(fn)); + return; + case FMOV_s: + WriteSRegister(fd, ReadSRegister(fn)); + return; + case FMOV_d: + WriteDRegister(fd, ReadDRegister(fn)); + return; + case FABS_h: + case FABS_s: + case FABS_d: + fabs_(vform, ReadVRegister(fd), ReadVRegister(fn)); + // Explicitly log the register update whilst we have type information. + LogVRegister(fd, GetPrintRegisterFormatFP(vform)); + return; + case FNEG_h: + case FNEG_s: + case FNEG_d: + fneg(vform, ReadVRegister(fd), ReadVRegister(fn)); + // Explicitly log the register update whilst we have type information. + LogVRegister(fd, GetPrintRegisterFormatFP(vform)); + return; + case FCVT_ds: + WriteDRegister(fd, FPToDouble(ReadSRegister(fn), ReadDN())); + return; + case FCVT_sd: + WriteSRegister(fd, FPToFloat(ReadDRegister(fn), FPTieEven, ReadDN())); + return; + case FCVT_hs: + WriteHRegister(fd, + Float16ToRawbits( + FPToFloat16(ReadSRegister(fn), FPTieEven, ReadDN()))); + return; + case FCVT_sh: + WriteSRegister(fd, FPToFloat(ReadHRegister(fn), ReadDN())); + return; + case FCVT_dh: + WriteDRegister(fd, FPToDouble(ReadHRegister(fn), ReadDN())); + return; + case FCVT_hd: + WriteHRegister(fd, + Float16ToRawbits( + FPToFloat16(ReadDRegister(fn), FPTieEven, ReadDN()))); + return; + case FSQRT_h: + case FSQRT_s: + case FSQRT_d: + fsqrt(vform, rd, rn); + // Explicitly log the register update whilst we have type information. + LogVRegister(fd, GetPrintRegisterFormatFP(vform)); + return; + case FRINTI_h: + case FRINTI_s: + case FRINTI_d: + break; // Use FPCR rounding mode. + case FRINTX_h: + case FRINTX_s: + case FRINTX_d: + inexact_exception = true; + break; + case FRINTA_h: + case FRINTA_s: + case FRINTA_d: + fpcr_rounding = FPTieAway; + break; + case FRINTM_h: + case FRINTM_s: + case FRINTM_d: + fpcr_rounding = FPNegativeInfinity; + break; + case FRINTN_h: + case FRINTN_s: + case FRINTN_d: + fpcr_rounding = FPTieEven; + break; + case FRINTP_h: + case FRINTP_s: + case FRINTP_d: + fpcr_rounding = FPPositiveInfinity; + break; + case FRINTZ_h: + case FRINTZ_s: + case FRINTZ_d: + fpcr_rounding = FPZero; + break; + default: + VIXL_UNIMPLEMENTED(); + } + + // Only FRINT* instructions fall through the switch above. + frint(vform, rd, rn, fpcr_rounding, inexact_exception); + // Explicitly log the register update whilst we have type information. + LogVRegister(fd, GetPrintRegisterFormatFP(vform)); +} + + +void Simulator::VisitFPDataProcessing2Source(const Instruction* instr) { + AssertSupportedFPCR(); + + VectorFormat vform; + switch (instr->Mask(FPTypeMask)) { + default: + VIXL_UNREACHABLE_OR_FALLTHROUGH(); + case FP64: + vform = kFormatD; + break; + case FP32: + vform = kFormatS; + break; + case FP16: + vform = kFormatH; + break; + } + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + + switch (instr->Mask(FPDataProcessing2SourceMask)) { + case FADD_h: + case FADD_s: + case FADD_d: + fadd(vform, rd, rn, rm); + break; + case FSUB_h: + case FSUB_s: + case FSUB_d: + fsub(vform, rd, rn, rm); + break; + case FMUL_h: + case FMUL_s: + case FMUL_d: + fmul(vform, rd, rn, rm); + break; + case FNMUL_h: + case FNMUL_s: + case FNMUL_d: + fnmul(vform, rd, rn, rm); + break; + case FDIV_h: + case FDIV_s: + case FDIV_d: + fdiv(vform, rd, rn, rm); + break; + case FMAX_h: + case FMAX_s: + case FMAX_d: + fmax(vform, rd, rn, rm); + break; + case FMIN_h: + case FMIN_s: + case FMIN_d: + fmin(vform, rd, rn, rm); + break; + case FMAXNM_h: + case FMAXNM_s: + case FMAXNM_d: + fmaxnm(vform, rd, rn, rm); + break; + case FMINNM_h: + case FMINNM_s: + case FMINNM_d: + fminnm(vform, rd, rn, rm); + break; + default: + VIXL_UNREACHABLE(); + } + // Explicitly log the register update whilst we have type information. + LogVRegister(instr->GetRd(), GetPrintRegisterFormatFP(vform)); +} + + +void Simulator::VisitFPDataProcessing3Source(const Instruction* instr) { + AssertSupportedFPCR(); + + unsigned fd = instr->GetRd(); + unsigned fn = instr->GetRn(); + unsigned fm = instr->GetRm(); + unsigned fa = instr->GetRa(); + + switch (instr->Mask(FPDataProcessing3SourceMask)) { + // fd = fa +/- (fn * fm) + case FMADD_h: + WriteHRegister(fd, + FPMulAdd(ReadHRegister(fa), + ReadHRegister(fn), + ReadHRegister(fm))); + break; + case FMSUB_h: + WriteHRegister(fd, + FPMulAdd(ReadHRegister(fa), + -ReadHRegister(fn), + ReadHRegister(fm))); + break; + case FMADD_s: + WriteSRegister(fd, + FPMulAdd(ReadSRegister(fa), + ReadSRegister(fn), + ReadSRegister(fm))); + break; + case FMSUB_s: + WriteSRegister(fd, + FPMulAdd(ReadSRegister(fa), + -ReadSRegister(fn), + ReadSRegister(fm))); + break; + case FMADD_d: + WriteDRegister(fd, + FPMulAdd(ReadDRegister(fa), + ReadDRegister(fn), + ReadDRegister(fm))); + break; + case FMSUB_d: + WriteDRegister(fd, + FPMulAdd(ReadDRegister(fa), + -ReadDRegister(fn), + ReadDRegister(fm))); + break; + // Negated variants of the above. + case FNMADD_h: + WriteHRegister(fd, + FPMulAdd(-ReadHRegister(fa), + -ReadHRegister(fn), + ReadHRegister(fm))); + break; + case FNMSUB_h: + WriteHRegister(fd, + FPMulAdd(-ReadHRegister(fa), + ReadHRegister(fn), + ReadHRegister(fm))); + break; + case FNMADD_s: + WriteSRegister(fd, + FPMulAdd(-ReadSRegister(fa), + -ReadSRegister(fn), + ReadSRegister(fm))); + break; + case FNMSUB_s: + WriteSRegister(fd, + FPMulAdd(-ReadSRegister(fa), + ReadSRegister(fn), + ReadSRegister(fm))); + break; + case FNMADD_d: + WriteDRegister(fd, + FPMulAdd(-ReadDRegister(fa), + -ReadDRegister(fn), + ReadDRegister(fm))); + break; + case FNMSUB_d: + WriteDRegister(fd, + FPMulAdd(-ReadDRegister(fa), + ReadDRegister(fn), + ReadDRegister(fm))); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +bool Simulator::FPProcessNaNs(const Instruction* instr) { + unsigned fd = instr->GetRd(); + unsigned fn = instr->GetRn(); + unsigned fm = instr->GetRm(); + bool done = false; + + if (instr->Mask(FP64) == FP64) { + double result = FPProcessNaNs(ReadDRegister(fn), ReadDRegister(fm)); + if (IsNaN(result)) { + WriteDRegister(fd, result); + done = true; + } + } else if (instr->Mask(FP32) == FP32) { + float result = FPProcessNaNs(ReadSRegister(fn), ReadSRegister(fm)); + if (IsNaN(result)) { + WriteSRegister(fd, result); + done = true; + } + } else { + VIXL_ASSERT(instr->Mask(FP16) == FP16); + VIXL_UNIMPLEMENTED(); + } + + return done; +} + + +void Simulator::SysOp_W(int op, int64_t val) { + switch (op) { + case IVAU: + case CVAC: + case CVAU: + case CVAP: + case CIVAC: { + // Perform a dummy memory access to ensure that we have read access + // to the specified address. + volatile uint8_t y = Memory::Read(val); + USE(y); + // TODO: Implement "case ZVA:". + break; + } + default: + VIXL_UNIMPLEMENTED(); + } +} + + +// clang-format off +#define PAUTH_SYSTEM_MODES(V) \ + V(A1716, 17, ReadXRegister(16), kPACKeyIA) \ + V(B1716, 17, ReadXRegister(16), kPACKeyIB) \ + V(AZ, 30, 0x00000000, kPACKeyIA) \ + V(BZ, 30, 0x00000000, kPACKeyIB) \ + V(ASP, 30, ReadXRegister(31, Reg31IsStackPointer), kPACKeyIA) \ + V(BSP, 30, ReadXRegister(31, Reg31IsStackPointer), kPACKeyIB) +// clang-format on + + +void Simulator::VisitSystem(const Instruction* instr) { + // Some system instructions hijack their Op and Cp fields to represent a + // range of immediates instead of indicating a different instruction. This + // makes the decoding tricky. + if (instr->GetInstructionBits() == XPACLRI) { + WriteXRegister(30, StripPAC(ReadXRegister(30), kInstructionPointer)); + } else if (instr->Mask(SystemPStateFMask) == SystemPStateFixed) { + switch (instr->Mask(SystemPStateMask)) { + case CFINV: + ReadNzcv().SetC(!ReadC()); + break; + case AXFLAG: + ReadNzcv().SetN(0); + ReadNzcv().SetZ(ReadNzcv().GetZ() | ReadNzcv().GetV()); + ReadNzcv().SetC(ReadNzcv().GetC() & ~ReadNzcv().GetV()); + ReadNzcv().SetV(0); + break; + case XAFLAG: { + // Can't set the flags in place due to the logical dependencies. + uint32_t n = (~ReadNzcv().GetC() & ~ReadNzcv().GetZ()) & 1; + uint32_t z = ReadNzcv().GetZ() & ReadNzcv().GetC(); + uint32_t c = ReadNzcv().GetC() | ReadNzcv().GetZ(); + uint32_t v = ~ReadNzcv().GetC() & ReadNzcv().GetZ(); + ReadNzcv().SetN(n); + ReadNzcv().SetZ(z); + ReadNzcv().SetC(c); + ReadNzcv().SetV(v); + break; + } + } + } else if (instr->Mask(SystemPAuthFMask) == SystemPAuthFixed) { + // Check BType allows PACI[AB]SP instructions. + if (PcIsInGuardedPage()) { + Instr i = instr->Mask(SystemPAuthMask); + if ((i == PACIASP) || (i == PACIBSP)) { + switch (ReadBType()) { + case DefaultBType: + VIXL_ABORT_WITH_MSG("Executing PACIXSP with wrong BType."); + break; + case BranchFromGuardedNotToIP: + // TODO: This case depends on the value of SCTLR_EL1.BT0, which we + // assume here to be zero. This allows execution of PACI[AB]SP when + // BTYPE is BranchFromGuardedNotToIP (0b11). + case BranchFromUnguardedOrToIP: + case BranchAndLink: + break; + } + } + } + + switch (instr->Mask(SystemPAuthMask)) { +#define DEFINE_PAUTH_FUNCS(SUFFIX, DST, MOD, KEY) \ + case PACI##SUFFIX: \ + WriteXRegister(DST, \ + AddPAC(ReadXRegister(DST), MOD, KEY, kInstructionPointer)); \ + break; \ + case AUTI##SUFFIX: \ + WriteXRegister(DST, \ + AuthPAC(ReadXRegister(DST), \ + MOD, \ + KEY, \ + kInstructionPointer)); \ + break; + + PAUTH_SYSTEM_MODES(DEFINE_PAUTH_FUNCS) +#undef DEFINE_PAUTH_FUNCS + } + } else if (instr->Mask(SystemExclusiveMonitorFMask) == + SystemExclusiveMonitorFixed) { + VIXL_ASSERT(instr->Mask(SystemExclusiveMonitorMask) == CLREX); + switch (instr->Mask(SystemExclusiveMonitorMask)) { + case CLREX: { + PrintExclusiveAccessWarning(); + ClearLocalMonitor(); + break; + } + } + } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) { + switch (instr->Mask(SystemSysRegMask)) { + case MRS: { + switch (instr->GetImmSystemRegister()) { + case NZCV: + WriteXRegister(instr->GetRt(), ReadNzcv().GetRawValue()); + break; + case FPCR: + WriteXRegister(instr->GetRt(), ReadFpcr().GetRawValue()); + break; + default: + VIXL_UNIMPLEMENTED(); + } + break; + } + case MSR: { + switch (instr->GetImmSystemRegister()) { + case NZCV: + ReadNzcv().SetRawValue(ReadWRegister(instr->GetRt())); + LogSystemRegister(NZCV); + break; + case FPCR: + ReadFpcr().SetRawValue(ReadWRegister(instr->GetRt())); + LogSystemRegister(FPCR); + break; + default: + VIXL_UNIMPLEMENTED(); + } + break; + } + } + } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) { + VIXL_ASSERT(instr->Mask(SystemHintMask) == HINT); + switch (instr->GetImmHint()) { + case NOP: + case ESB: + case CSDB: + case BTI_jc: + break; + case BTI: + if (PcIsInGuardedPage() && (ReadBType() != DefaultBType)) { + VIXL_ABORT_WITH_MSG("Executing BTI with wrong BType."); + } + break; + case BTI_c: + if (PcIsInGuardedPage() && (ReadBType() == BranchFromGuardedNotToIP)) { + VIXL_ABORT_WITH_MSG("Executing BTI c with wrong BType."); + } + break; + case BTI_j: + if (PcIsInGuardedPage() && (ReadBType() == BranchAndLink)) { + VIXL_ABORT_WITH_MSG("Executing BTI j with wrong BType."); + } + break; + default: + VIXL_UNIMPLEMENTED(); + } + } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) { + __sync_synchronize(); + } else if ((instr->Mask(SystemSysFMask) == SystemSysFixed)) { + switch (instr->Mask(SystemSysMask)) { + case SYS: + SysOp_W(instr->GetSysOp(), ReadXRegister(instr->GetRt())); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } else { + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitException(const Instruction* instr) { + switch (instr->Mask(ExceptionMask)) { + case HLT: + switch (instr->GetImmException()) { + case kUnreachableOpcode: + DoUnreachable(instr); + return; + case kTraceOpcode: + DoTrace(instr); + return; + case kLogOpcode: + DoLog(instr); + return; + case kPrintfOpcode: + DoPrintf(instr); + return; + case kRuntimeCallOpcode: + DoRuntimeCall(instr); + return; + case kSetCPUFeaturesOpcode: + case kEnableCPUFeaturesOpcode: + case kDisableCPUFeaturesOpcode: + DoConfigureCPUFeatures(instr); + return; + case kSaveCPUFeaturesOpcode: + DoSaveCPUFeatures(instr); + return; + case kRestoreCPUFeaturesOpcode: + DoRestoreCPUFeatures(instr); + return; + default: + HostBreakpoint(); + return; + } + case BRK: + HostBreakpoint(); + return; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitCrypto2RegSHA(const Instruction* instr) { + VisitUnimplemented(instr); +} + + +void Simulator::VisitCrypto3RegSHA(const Instruction* instr) { + VisitUnimplemented(instr); +} + + +void Simulator::VisitCryptoAES(const Instruction* instr) { + VisitUnimplemented(instr); +} + + +void Simulator::VisitNEON2RegMisc(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + VectorFormat vf = nfd.GetVectorFormat(); + + static const NEONFormatMap map_lp = + {{23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}}; + VectorFormat vf_lp = nfd.GetVectorFormat(&map_lp); + + static const NEONFormatMap map_fcvtl = {{22}, {NF_4S, NF_2D}}; + VectorFormat vf_fcvtl = nfd.GetVectorFormat(&map_fcvtl); + + static const NEONFormatMap map_fcvtn = {{22, 30}, + {NF_4H, NF_8H, NF_2S, NF_4S}}; + VectorFormat vf_fcvtn = nfd.GetVectorFormat(&map_fcvtn); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + + if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) { + // These instructions all use a two bit size field, except NOT and RBIT, + // which use the field to encode the operation. + switch (instr->Mask(NEON2RegMiscMask)) { + case NEON_REV64: + rev64(vf, rd, rn); + break; + case NEON_REV32: + rev32(vf, rd, rn); + break; + case NEON_REV16: + rev16(vf, rd, rn); + break; + case NEON_SUQADD: + suqadd(vf, rd, rn); + break; + case NEON_USQADD: + usqadd(vf, rd, rn); + break; + case NEON_CLS: + cls(vf, rd, rn); + break; + case NEON_CLZ: + clz(vf, rd, rn); + break; + case NEON_CNT: + cnt(vf, rd, rn); + break; + case NEON_SQABS: + abs(vf, rd, rn).SignedSaturate(vf); + break; + case NEON_SQNEG: + neg(vf, rd, rn).SignedSaturate(vf); + break; + case NEON_CMGT_zero: + cmp(vf, rd, rn, 0, gt); + break; + case NEON_CMGE_zero: + cmp(vf, rd, rn, 0, ge); + break; + case NEON_CMEQ_zero: + cmp(vf, rd, rn, 0, eq); + break; + case NEON_CMLE_zero: + cmp(vf, rd, rn, 0, le); + break; + case NEON_CMLT_zero: + cmp(vf, rd, rn, 0, lt); + break; + case NEON_ABS: + abs(vf, rd, rn); + break; + case NEON_NEG: + neg(vf, rd, rn); + break; + case NEON_SADDLP: + saddlp(vf_lp, rd, rn); + break; + case NEON_UADDLP: + uaddlp(vf_lp, rd, rn); + break; + case NEON_SADALP: + sadalp(vf_lp, rd, rn); + break; + case NEON_UADALP: + uadalp(vf_lp, rd, rn); + break; + case NEON_RBIT_NOT: + vf = nfd.GetVectorFormat(nfd.LogicalFormatMap()); + switch (instr->GetFPType()) { + case 0: + not_(vf, rd, rn); + break; + case 1: + rbit(vf, rd, rn); + break; + default: + VIXL_UNIMPLEMENTED(); + } + break; + } + } else { + VectorFormat fpf = nfd.GetVectorFormat(nfd.FPFormatMap()); + FPRounding fpcr_rounding = static_cast(ReadFpcr().GetRMode()); + bool inexact_exception = false; + + // These instructions all use a one bit size field, except XTN, SQXTUN, + // SHLL, SQXTN and UQXTN, which use a two bit size field. + switch (instr->Mask(NEON2RegMiscFPMask)) { + case NEON_FABS: + fabs_(fpf, rd, rn); + return; + case NEON_FNEG: + fneg(fpf, rd, rn); + return; + case NEON_FSQRT: + fsqrt(fpf, rd, rn); + return; + case NEON_FCVTL: + if (instr->Mask(NEON_Q)) { + fcvtl2(vf_fcvtl, rd, rn); + } else { + fcvtl(vf_fcvtl, rd, rn); + } + return; + case NEON_FCVTN: + if (instr->Mask(NEON_Q)) { + fcvtn2(vf_fcvtn, rd, rn); + } else { + fcvtn(vf_fcvtn, rd, rn); + } + return; + case NEON_FCVTXN: + if (instr->Mask(NEON_Q)) { + fcvtxn2(vf_fcvtn, rd, rn); + } else { + fcvtxn(vf_fcvtn, rd, rn); + } + return; + + // The following instructions break from the switch statement, rather + // than return. + case NEON_FRINTI: + break; // Use FPCR rounding mode. + case NEON_FRINTX: + inexact_exception = true; + break; + case NEON_FRINTA: + fpcr_rounding = FPTieAway; + break; + case NEON_FRINTM: + fpcr_rounding = FPNegativeInfinity; + break; + case NEON_FRINTN: + fpcr_rounding = FPTieEven; + break; + case NEON_FRINTP: + fpcr_rounding = FPPositiveInfinity; + break; + case NEON_FRINTZ: + fpcr_rounding = FPZero; + break; + + case NEON_FCVTNS: + fcvts(fpf, rd, rn, FPTieEven); + return; + case NEON_FCVTNU: + fcvtu(fpf, rd, rn, FPTieEven); + return; + case NEON_FCVTPS: + fcvts(fpf, rd, rn, FPPositiveInfinity); + return; + case NEON_FCVTPU: + fcvtu(fpf, rd, rn, FPPositiveInfinity); + return; + case NEON_FCVTMS: + fcvts(fpf, rd, rn, FPNegativeInfinity); + return; + case NEON_FCVTMU: + fcvtu(fpf, rd, rn, FPNegativeInfinity); + return; + case NEON_FCVTZS: + fcvts(fpf, rd, rn, FPZero); + return; + case NEON_FCVTZU: + fcvtu(fpf, rd, rn, FPZero); + return; + case NEON_FCVTAS: + fcvts(fpf, rd, rn, FPTieAway); + return; + case NEON_FCVTAU: + fcvtu(fpf, rd, rn, FPTieAway); + return; + case NEON_SCVTF: + scvtf(fpf, rd, rn, 0, fpcr_rounding); + return; + case NEON_UCVTF: + ucvtf(fpf, rd, rn, 0, fpcr_rounding); + return; + case NEON_URSQRTE: + ursqrte(fpf, rd, rn); + return; + case NEON_URECPE: + urecpe(fpf, rd, rn); + return; + case NEON_FRSQRTE: + frsqrte(fpf, rd, rn); + return; + case NEON_FRECPE: + frecpe(fpf, rd, rn, fpcr_rounding); + return; + case NEON_FCMGT_zero: + fcmp_zero(fpf, rd, rn, gt); + return; + case NEON_FCMGE_zero: + fcmp_zero(fpf, rd, rn, ge); + return; + case NEON_FCMEQ_zero: + fcmp_zero(fpf, rd, rn, eq); + return; + case NEON_FCMLE_zero: + fcmp_zero(fpf, rd, rn, le); + return; + case NEON_FCMLT_zero: + fcmp_zero(fpf, rd, rn, lt); + return; + default: + if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) && + (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) { + switch (instr->Mask(NEON2RegMiscMask)) { + case NEON_XTN: + xtn(vf, rd, rn); + return; + case NEON_SQXTN: + sqxtn(vf, rd, rn); + return; + case NEON_UQXTN: + uqxtn(vf, rd, rn); + return; + case NEON_SQXTUN: + sqxtun(vf, rd, rn); + return; + case NEON_SHLL: + vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap()); + if (instr->Mask(NEON_Q)) { + shll2(vf, rd, rn); + } else { + shll(vf, rd, rn); + } + return; + default: + VIXL_UNIMPLEMENTED(); + } + } else { + VIXL_UNIMPLEMENTED(); + } + } + + // Only FRINT* instructions fall through the switch above. + frint(fpf, rd, rn, fpcr_rounding, inexact_exception); + } +} + + +void Simulator::VisitNEON2RegMiscFP16(const Instruction* instr) { + static const NEONFormatMap map_half = {{30}, {NF_4H, NF_8H}}; + NEONFormatDecoder nfd(instr); + VectorFormat fpf = nfd.GetVectorFormat(&map_half); + + FPRounding fpcr_rounding = static_cast(ReadFpcr().GetRMode()); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + + switch (instr->Mask(NEON2RegMiscFP16Mask)) { + case NEON_SCVTF_H: + scvtf(fpf, rd, rn, 0, fpcr_rounding); + return; + case NEON_UCVTF_H: + ucvtf(fpf, rd, rn, 0, fpcr_rounding); + return; + case NEON_FCVTNS_H: + fcvts(fpf, rd, rn, FPTieEven); + return; + case NEON_FCVTNU_H: + fcvtu(fpf, rd, rn, FPTieEven); + return; + case NEON_FCVTPS_H: + fcvts(fpf, rd, rn, FPPositiveInfinity); + return; + case NEON_FCVTPU_H: + fcvtu(fpf, rd, rn, FPPositiveInfinity); + return; + case NEON_FCVTMS_H: + fcvts(fpf, rd, rn, FPNegativeInfinity); + return; + case NEON_FCVTMU_H: + fcvtu(fpf, rd, rn, FPNegativeInfinity); + return; + case NEON_FCVTZS_H: + fcvts(fpf, rd, rn, FPZero); + return; + case NEON_FCVTZU_H: + fcvtu(fpf, rd, rn, FPZero); + return; + case NEON_FCVTAS_H: + fcvts(fpf, rd, rn, FPTieAway); + return; + case NEON_FCVTAU_H: + fcvtu(fpf, rd, rn, FPTieAway); + return; + case NEON_FRINTI_H: + frint(fpf, rd, rn, fpcr_rounding, false); + return; + case NEON_FRINTX_H: + frint(fpf, rd, rn, fpcr_rounding, true); + return; + case NEON_FRINTA_H: + frint(fpf, rd, rn, FPTieAway, false); + return; + case NEON_FRINTM_H: + frint(fpf, rd, rn, FPNegativeInfinity, false); + return; + case NEON_FRINTN_H: + frint(fpf, rd, rn, FPTieEven, false); + return; + case NEON_FRINTP_H: + frint(fpf, rd, rn, FPPositiveInfinity, false); + return; + case NEON_FRINTZ_H: + frint(fpf, rd, rn, FPZero, false); + return; + case NEON_FABS_H: + fabs_(fpf, rd, rn); + return; + case NEON_FNEG_H: + fneg(fpf, rd, rn); + return; + case NEON_FSQRT_H: + fsqrt(fpf, rd, rn); + return; + case NEON_FRSQRTE_H: + frsqrte(fpf, rd, rn); + return; + case NEON_FRECPE_H: + frecpe(fpf, rd, rn, fpcr_rounding); + return; + case NEON_FCMGT_H_zero: + fcmp_zero(fpf, rd, rn, gt); + return; + case NEON_FCMGE_H_zero: + fcmp_zero(fpf, rd, rn, ge); + return; + case NEON_FCMEQ_H_zero: + fcmp_zero(fpf, rd, rn, eq); + return; + case NEON_FCMLE_H_zero: + fcmp_zero(fpf, rd, rn, le); + return; + case NEON_FCMLT_H_zero: + fcmp_zero(fpf, rd, rn, lt); + return; + default: + VIXL_UNIMPLEMENTED(); + return; + } +} + + +void Simulator::VisitNEON3Same(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + + if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) { + VectorFormat vf = nfd.GetVectorFormat(nfd.LogicalFormatMap()); + switch (instr->Mask(NEON3SameLogicalMask)) { + case NEON_AND: + and_(vf, rd, rn, rm); + break; + case NEON_ORR: + orr(vf, rd, rn, rm); + break; + case NEON_ORN: + orn(vf, rd, rn, rm); + break; + case NEON_EOR: + eor(vf, rd, rn, rm); + break; + case NEON_BIC: + bic(vf, rd, rn, rm); + break; + case NEON_BIF: + bif(vf, rd, rn, rm); + break; + case NEON_BIT: + bit(vf, rd, rn, rm); + break; + case NEON_BSL: + bsl(vf, rd, rn, rm); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } else if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) { + VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap()); + switch (instr->Mask(NEON3SameFPMask)) { + case NEON_FADD: + fadd(vf, rd, rn, rm); + break; + case NEON_FSUB: + fsub(vf, rd, rn, rm); + break; + case NEON_FMUL: + fmul(vf, rd, rn, rm); + break; + case NEON_FDIV: + fdiv(vf, rd, rn, rm); + break; + case NEON_FMAX: + fmax(vf, rd, rn, rm); + break; + case NEON_FMIN: + fmin(vf, rd, rn, rm); + break; + case NEON_FMAXNM: + fmaxnm(vf, rd, rn, rm); + break; + case NEON_FMINNM: + fminnm(vf, rd, rn, rm); + break; + case NEON_FMLA: + fmla(vf, rd, rn, rm); + break; + case NEON_FMLS: + fmls(vf, rd, rn, rm); + break; + case NEON_FMULX: + fmulx(vf, rd, rn, rm); + break; + case NEON_FACGE: + fabscmp(vf, rd, rn, rm, ge); + break; + case NEON_FACGT: + fabscmp(vf, rd, rn, rm, gt); + break; + case NEON_FCMEQ: + fcmp(vf, rd, rn, rm, eq); + break; + case NEON_FCMGE: + fcmp(vf, rd, rn, rm, ge); + break; + case NEON_FCMGT: + fcmp(vf, rd, rn, rm, gt); + break; + case NEON_FRECPS: + frecps(vf, rd, rn, rm); + break; + case NEON_FRSQRTS: + frsqrts(vf, rd, rn, rm); + break; + case NEON_FABD: + fabd(vf, rd, rn, rm); + break; + case NEON_FADDP: + faddp(vf, rd, rn, rm); + break; + case NEON_FMAXP: + fmaxp(vf, rd, rn, rm); + break; + case NEON_FMAXNMP: + fmaxnmp(vf, rd, rn, rm); + break; + case NEON_FMINP: + fminp(vf, rd, rn, rm); + break; + case NEON_FMINNMP: + fminnmp(vf, rd, rn, rm); + break; + default: + // FMLAL{2} and FMLSL{2} have special-case encodings. + switch (instr->Mask(NEON3SameFHMMask)) { + case NEON_FMLAL: + fmlal(vf, rd, rn, rm); + break; + case NEON_FMLAL2: + fmlal2(vf, rd, rn, rm); + break; + case NEON_FMLSL: + fmlsl(vf, rd, rn, rm); + break; + case NEON_FMLSL2: + fmlsl2(vf, rd, rn, rm); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } + } else { + VectorFormat vf = nfd.GetVectorFormat(); + switch (instr->Mask(NEON3SameMask)) { + case NEON_ADD: + add(vf, rd, rn, rm); + break; + case NEON_ADDP: + addp(vf, rd, rn, rm); + break; + case NEON_CMEQ: + cmp(vf, rd, rn, rm, eq); + break; + case NEON_CMGE: + cmp(vf, rd, rn, rm, ge); + break; + case NEON_CMGT: + cmp(vf, rd, rn, rm, gt); + break; + case NEON_CMHI: + cmp(vf, rd, rn, rm, hi); + break; + case NEON_CMHS: + cmp(vf, rd, rn, rm, hs); + break; + case NEON_CMTST: + cmptst(vf, rd, rn, rm); + break; + case NEON_MLS: + mls(vf, rd, rn, rm); + break; + case NEON_MLA: + mla(vf, rd, rn, rm); + break; + case NEON_MUL: + mul(vf, rd, rn, rm); + break; + case NEON_PMUL: + pmul(vf, rd, rn, rm); + break; + case NEON_SMAX: + smax(vf, rd, rn, rm); + break; + case NEON_SMAXP: + smaxp(vf, rd, rn, rm); + break; + case NEON_SMIN: + smin(vf, rd, rn, rm); + break; + case NEON_SMINP: + sminp(vf, rd, rn, rm); + break; + case NEON_SUB: + sub(vf, rd, rn, rm); + break; + case NEON_UMAX: + umax(vf, rd, rn, rm); + break; + case NEON_UMAXP: + umaxp(vf, rd, rn, rm); + break; + case NEON_UMIN: + umin(vf, rd, rn, rm); + break; + case NEON_UMINP: + uminp(vf, rd, rn, rm); + break; + case NEON_SSHL: + sshl(vf, rd, rn, rm); + break; + case NEON_USHL: + ushl(vf, rd, rn, rm); + break; + case NEON_SABD: + absdiff(vf, rd, rn, rm, true); + break; + case NEON_UABD: + absdiff(vf, rd, rn, rm, false); + break; + case NEON_SABA: + saba(vf, rd, rn, rm); + break; + case NEON_UABA: + uaba(vf, rd, rn, rm); + break; + case NEON_UQADD: + add(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQADD: + add(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_UQSUB: + sub(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQSUB: + sub(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_SQDMULH: + sqdmulh(vf, rd, rn, rm); + break; + case NEON_SQRDMULH: + sqrdmulh(vf, rd, rn, rm); + break; + case NEON_UQSHL: + ushl(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQSHL: + sshl(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_URSHL: + ushl(vf, rd, rn, rm).Round(vf); + break; + case NEON_SRSHL: + sshl(vf, rd, rn, rm).Round(vf); + break; + case NEON_UQRSHL: + ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf); + break; + case NEON_SQRSHL: + sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf); + break; + case NEON_UHADD: + add(vf, rd, rn, rm).Uhalve(vf); + break; + case NEON_URHADD: + add(vf, rd, rn, rm).Uhalve(vf).Round(vf); + break; + case NEON_SHADD: + add(vf, rd, rn, rm).Halve(vf); + break; + case NEON_SRHADD: + add(vf, rd, rn, rm).Halve(vf).Round(vf); + break; + case NEON_UHSUB: + sub(vf, rd, rn, rm).Uhalve(vf); + break; + case NEON_SHSUB: + sub(vf, rd, rn, rm).Halve(vf); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } +} + + +void Simulator::VisitNEON3SameFP16(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + + VectorFormat vf = nfd.GetVectorFormat(nfd.FP16FormatMap()); + switch (instr->Mask(NEON3SameFP16Mask)) { +#define SIM_FUNC(A, B) \ + case NEON_##A##_H: \ + B(vf, rd, rn, rm); \ + break; + SIM_FUNC(FMAXNM, fmaxnm); + SIM_FUNC(FMLA, fmla); + SIM_FUNC(FADD, fadd); + SIM_FUNC(FMULX, fmulx); + SIM_FUNC(FMAX, fmax); + SIM_FUNC(FRECPS, frecps); + SIM_FUNC(FMINNM, fminnm); + SIM_FUNC(FMLS, fmls); + SIM_FUNC(FSUB, fsub); + SIM_FUNC(FMIN, fmin); + SIM_FUNC(FRSQRTS, frsqrts); + SIM_FUNC(FMAXNMP, fmaxnmp); + SIM_FUNC(FADDP, faddp); + SIM_FUNC(FMUL, fmul); + SIM_FUNC(FMAXP, fmaxp); + SIM_FUNC(FDIV, fdiv); + SIM_FUNC(FMINNMP, fminnmp); + SIM_FUNC(FABD, fabd); + SIM_FUNC(FMINP, fminp); +#undef SIM_FUNC + case NEON_FCMEQ_H: + fcmp(vf, rd, rn, rm, eq); + break; + case NEON_FCMGE_H: + fcmp(vf, rd, rn, rm, ge); + break; + case NEON_FACGE_H: + fabscmp(vf, rd, rn, rm, ge); + break; + case NEON_FCMGT_H: + fcmp(vf, rd, rn, rm, gt); + break; + case NEON_FACGT_H: + fabscmp(vf, rd, rn, rm, gt); + break; + default: + VIXL_UNIMPLEMENTED(); + break; + } +} + +void Simulator::VisitNEON3SameExtra(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + int rot = 0; + VectorFormat vf = nfd.GetVectorFormat(); + if (instr->Mask(NEON3SameExtraFCMLAMask) == NEON_FCMLA) { + rot = instr->GetImmRotFcmlaVec(); + fcmla(vf, rd, rn, rm, rot); + } else if (instr->Mask(NEON3SameExtraFCADDMask) == NEON_FCADD) { + rot = instr->GetImmRotFcadd(); + fcadd(vf, rd, rn, rm, rot); + } else { + switch (instr->Mask(NEON3SameExtraMask)) { + case NEON_SDOT: + sdot(vf, rd, rn, rm); + break; + case NEON_SQRDMLAH: + sqrdmlah(vf, rd, rn, rm); + break; + case NEON_UDOT: + udot(vf, rd, rn, rm); + break; + case NEON_SQRDMLSH: + sqrdmlsh(vf, rd, rn, rm); + break; + default: + VIXL_UNIMPLEMENTED(); + break; + } + } +} + + +void Simulator::VisitNEON3Different(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + VectorFormat vf = nfd.GetVectorFormat(); + VectorFormat vf_l = nfd.GetVectorFormat(nfd.LongIntegerFormatMap()); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + + switch (instr->Mask(NEON3DifferentMask)) { + case NEON_PMULL: + pmull(vf_l, rd, rn, rm); + break; + case NEON_PMULL2: + pmull2(vf_l, rd, rn, rm); + break; + case NEON_UADDL: + uaddl(vf_l, rd, rn, rm); + break; + case NEON_UADDL2: + uaddl2(vf_l, rd, rn, rm); + break; + case NEON_SADDL: + saddl(vf_l, rd, rn, rm); + break; + case NEON_SADDL2: + saddl2(vf_l, rd, rn, rm); + break; + case NEON_USUBL: + usubl(vf_l, rd, rn, rm); + break; + case NEON_USUBL2: + usubl2(vf_l, rd, rn, rm); + break; + case NEON_SSUBL: + ssubl(vf_l, rd, rn, rm); + break; + case NEON_SSUBL2: + ssubl2(vf_l, rd, rn, rm); + break; + case NEON_SABAL: + sabal(vf_l, rd, rn, rm); + break; + case NEON_SABAL2: + sabal2(vf_l, rd, rn, rm); + break; + case NEON_UABAL: + uabal(vf_l, rd, rn, rm); + break; + case NEON_UABAL2: + uabal2(vf_l, rd, rn, rm); + break; + case NEON_SABDL: + sabdl(vf_l, rd, rn, rm); + break; + case NEON_SABDL2: + sabdl2(vf_l, rd, rn, rm); + break; + case NEON_UABDL: + uabdl(vf_l, rd, rn, rm); + break; + case NEON_UABDL2: + uabdl2(vf_l, rd, rn, rm); + break; + case NEON_SMLAL: + smlal(vf_l, rd, rn, rm); + break; + case NEON_SMLAL2: + smlal2(vf_l, rd, rn, rm); + break; + case NEON_UMLAL: + umlal(vf_l, rd, rn, rm); + break; + case NEON_UMLAL2: + umlal2(vf_l, rd, rn, rm); + break; + case NEON_SMLSL: + smlsl(vf_l, rd, rn, rm); + break; + case NEON_SMLSL2: + smlsl2(vf_l, rd, rn, rm); + break; + case NEON_UMLSL: + umlsl(vf_l, rd, rn, rm); + break; + case NEON_UMLSL2: + umlsl2(vf_l, rd, rn, rm); + break; + case NEON_SMULL: + smull(vf_l, rd, rn, rm); + break; + case NEON_SMULL2: + smull2(vf_l, rd, rn, rm); + break; + case NEON_UMULL: + umull(vf_l, rd, rn, rm); + break; + case NEON_UMULL2: + umull2(vf_l, rd, rn, rm); + break; + case NEON_SQDMLAL: + sqdmlal(vf_l, rd, rn, rm); + break; + case NEON_SQDMLAL2: + sqdmlal2(vf_l, rd, rn, rm); + break; + case NEON_SQDMLSL: + sqdmlsl(vf_l, rd, rn, rm); + break; + case NEON_SQDMLSL2: + sqdmlsl2(vf_l, rd, rn, rm); + break; + case NEON_SQDMULL: + sqdmull(vf_l, rd, rn, rm); + break; + case NEON_SQDMULL2: + sqdmull2(vf_l, rd, rn, rm); + break; + case NEON_UADDW: + uaddw(vf_l, rd, rn, rm); + break; + case NEON_UADDW2: + uaddw2(vf_l, rd, rn, rm); + break; + case NEON_SADDW: + saddw(vf_l, rd, rn, rm); + break; + case NEON_SADDW2: + saddw2(vf_l, rd, rn, rm); + break; + case NEON_USUBW: + usubw(vf_l, rd, rn, rm); + break; + case NEON_USUBW2: + usubw2(vf_l, rd, rn, rm); + break; + case NEON_SSUBW: + ssubw(vf_l, rd, rn, rm); + break; + case NEON_SSUBW2: + ssubw2(vf_l, rd, rn, rm); + break; + case NEON_ADDHN: + addhn(vf, rd, rn, rm); + break; + case NEON_ADDHN2: + addhn2(vf, rd, rn, rm); + break; + case NEON_RADDHN: + raddhn(vf, rd, rn, rm); + break; + case NEON_RADDHN2: + raddhn2(vf, rd, rn, rm); + break; + case NEON_SUBHN: + subhn(vf, rd, rn, rm); + break; + case NEON_SUBHN2: + subhn2(vf, rd, rn, rm); + break; + case NEON_RSUBHN: + rsubhn(vf, rd, rn, rm); + break; + case NEON_RSUBHN2: + rsubhn2(vf, rd, rn, rm); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONAcrossLanes(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + + static const NEONFormatMap map_half = {{30}, {NF_4H, NF_8H}}; + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + + if (instr->Mask(NEONAcrossLanesFP16FMask) == NEONAcrossLanesFP16Fixed) { + VectorFormat vf = nfd.GetVectorFormat(&map_half); + switch (instr->Mask(NEONAcrossLanesFP16Mask)) { + case NEON_FMAXV_H: + fmaxv(vf, rd, rn); + break; + case NEON_FMINV_H: + fminv(vf, rd, rn); + break; + case NEON_FMAXNMV_H: + fmaxnmv(vf, rd, rn); + break; + case NEON_FMINNMV_H: + fminnmv(vf, rd, rn); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } else if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) { + // The input operand's VectorFormat is passed for these instructions. + VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap()); + + switch (instr->Mask(NEONAcrossLanesFPMask)) { + case NEON_FMAXV: + fmaxv(vf, rd, rn); + break; + case NEON_FMINV: + fminv(vf, rd, rn); + break; + case NEON_FMAXNMV: + fmaxnmv(vf, rd, rn); + break; + case NEON_FMINNMV: + fminnmv(vf, rd, rn); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } else { + VectorFormat vf = nfd.GetVectorFormat(); + + switch (instr->Mask(NEONAcrossLanesMask)) { + case NEON_ADDV: + addv(vf, rd, rn); + break; + case NEON_SMAXV: + smaxv(vf, rd, rn); + break; + case NEON_SMINV: + sminv(vf, rd, rn); + break; + case NEON_UMAXV: + umaxv(vf, rd, rn); + break; + case NEON_UMINV: + uminv(vf, rd, rn); + break; + case NEON_SADDLV: + saddlv(vf, rd, rn); + break; + case NEON_UADDLV: + uaddlv(vf, rd, rn); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } +} + + +void Simulator::VisitNEONByIndexedElement(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + static const NEONFormatMap map_half = {{30}, {NF_4H, NF_8H}}; + VectorFormat vf_r = nfd.GetVectorFormat(); + VectorFormat vf_half = nfd.GetVectorFormat(&map_half); + VectorFormat vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap()); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + + ByElementOp Op = NULL; + + int rm_reg = instr->GetRm(); + int rm_low_reg = instr->GetRmLow16(); + int index = (instr->GetNEONH() << 1) | instr->GetNEONL(); + int index_hlm = (index << 1) | instr->GetNEONM(); + + switch (instr->Mask(NEONByIndexedElementFPLongMask)) { + // These are oddballs and are best handled as special cases. + // - Rm is encoded with only 4 bits (and must be in the lower 16 registers). + // - The index is always H:L:M. + case NEON_FMLAL_H_byelement: + fmlal(vf_r, rd, rn, ReadVRegister(rm_low_reg), index_hlm); + return; + case NEON_FMLAL2_H_byelement: + fmlal2(vf_r, rd, rn, ReadVRegister(rm_low_reg), index_hlm); + return; + case NEON_FMLSL_H_byelement: + fmlsl(vf_r, rd, rn, ReadVRegister(rm_low_reg), index_hlm); + return; + case NEON_FMLSL2_H_byelement: + fmlsl2(vf_r, rd, rn, ReadVRegister(rm_low_reg), index_hlm); + return; + } + + if (instr->GetNEONSize() == 1) { + rm_reg = rm_low_reg; + index = index_hlm; + } + + switch (instr->Mask(NEONByIndexedElementMask)) { + case NEON_MUL_byelement: + Op = &Simulator::mul; + vf = vf_r; + break; + case NEON_MLA_byelement: + Op = &Simulator::mla; + vf = vf_r; + break; + case NEON_MLS_byelement: + Op = &Simulator::mls; + vf = vf_r; + break; + case NEON_SQDMULH_byelement: + Op = &Simulator::sqdmulh; + vf = vf_r; + break; + case NEON_SQRDMULH_byelement: + Op = &Simulator::sqrdmulh; + vf = vf_r; + break; + case NEON_SDOT_byelement: + Op = &Simulator::sdot; + vf = vf_r; + break; + case NEON_SQRDMLAH_byelement: + Op = &Simulator::sqrdmlah; + vf = vf_r; + break; + case NEON_UDOT_byelement: + Op = &Simulator::udot; + vf = vf_r; + break; + case NEON_SQRDMLSH_byelement: + Op = &Simulator::sqrdmlsh; + vf = vf_r; + break; + case NEON_SMULL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::smull2; + } else { + Op = &Simulator::smull; + } + break; + case NEON_UMULL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::umull2; + } else { + Op = &Simulator::umull; + } + break; + case NEON_SMLAL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::smlal2; + } else { + Op = &Simulator::smlal; + } + break; + case NEON_UMLAL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::umlal2; + } else { + Op = &Simulator::umlal; + } + break; + case NEON_SMLSL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::smlsl2; + } else { + Op = &Simulator::smlsl; + } + break; + case NEON_UMLSL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::umlsl2; + } else { + Op = &Simulator::umlsl; + } + break; + case NEON_SQDMULL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::sqdmull2; + } else { + Op = &Simulator::sqdmull; + } + break; + case NEON_SQDMLAL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::sqdmlal2; + } else { + Op = &Simulator::sqdmlal; + } + break; + case NEON_SQDMLSL_byelement: + if (instr->Mask(NEON_Q)) { + Op = &Simulator::sqdmlsl2; + } else { + Op = &Simulator::sqdmlsl; + } + break; + default: + index = instr->GetNEONH(); + if (instr->GetFPType() == 0) { + rm_reg &= 0xf; + index = (index << 2) | (instr->GetNEONL() << 1) | instr->GetNEONM(); + } else if ((instr->GetFPType() & 1) == 0) { + index = (index << 1) | instr->GetNEONL(); + } + + vf = nfd.GetVectorFormat(nfd.FPFormatMap()); + + switch (instr->Mask(NEONByIndexedElementFPMask)) { + case NEON_FMUL_H_byelement: + vf = vf_half; + VIXL_FALLTHROUGH(); + case NEON_FMUL_byelement: + Op = &Simulator::fmul; + break; + case NEON_FMLA_H_byelement: + vf = vf_half; + VIXL_FALLTHROUGH(); + case NEON_FMLA_byelement: + Op = &Simulator::fmla; + break; + case NEON_FMLS_H_byelement: + vf = vf_half; + VIXL_FALLTHROUGH(); + case NEON_FMLS_byelement: + Op = &Simulator::fmls; + break; + case NEON_FMULX_H_byelement: + vf = vf_half; + VIXL_FALLTHROUGH(); + case NEON_FMULX_byelement: + Op = &Simulator::fmulx; + break; + default: + if (instr->GetNEONSize() == 2) { + index = instr->GetNEONH(); + } else { + index = (instr->GetNEONH() << 1) | instr->GetNEONL(); + } + switch (instr->Mask(NEONByIndexedElementFPComplexMask)) { + case NEON_FCMLA_byelement: + vf = vf_r; + fcmla(vf, + rd, + rn, + ReadVRegister(instr->GetRm()), + index, + instr->GetImmRotFcmlaSca()); + return; + default: + VIXL_UNIMPLEMENTED(); + } + } + } + + (this->*Op)(vf, rd, rn, ReadVRegister(rm_reg), index); +} + + +void Simulator::VisitNEONCopy(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + int imm5 = instr->GetImmNEON5(); + int tz = CountTrailingZeros(imm5, 32); + int reg_index = imm5 >> (tz + 1); + + if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) { + int imm4 = instr->GetImmNEON4(); + int rn_index = imm4 >> tz; + ins_element(vf, rd, reg_index, rn, rn_index); + } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) { + ins_immediate(vf, rd, reg_index, ReadXRegister(instr->GetRn())); + } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) { + uint64_t value = LogicVRegister(rn).Uint(vf, reg_index); + value &= MaxUintFromFormat(vf); + WriteXRegister(instr->GetRd(), value); + } else if (instr->Mask(NEONCopyUmovMask) == NEON_SMOV) { + int64_t value = LogicVRegister(rn).Int(vf, reg_index); + if (instr->GetNEONQ()) { + WriteXRegister(instr->GetRd(), value); + } else { + WriteWRegister(instr->GetRd(), (int32_t)value); + } + } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) { + dup_element(vf, rd, rn, reg_index); + } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) { + dup_immediate(vf, rd, ReadXRegister(instr->GetRn())); + } else { + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONExtract(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + if (instr->Mask(NEONExtractMask) == NEON_EXT) { + int index = instr->GetImmNEONExt(); + ext(vf, rd, rn, rm, index); + } else { + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr, + AddrMode addr_mode) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + uint64_t addr_base = ReadXRegister(instr->GetRn(), Reg31IsStackPointer); + int reg_size = RegisterSizeInBytesFromFormat(vf); + + int reg[4]; + uint64_t addr[4]; + for (int i = 0; i < 4; i++) { + reg[i] = (instr->GetRt() + i) % kNumberOfVRegisters; + addr[i] = addr_base + (i * reg_size); + } + int count = 1; + bool log_read = true; + + // Bit 23 determines whether this is an offset or post-index addressing mode. + // In offset mode, bits 20 to 16 should be zero; these bits encode the + // register or immediate in post-index mode. + if ((instr->ExtractBit(23) == 0) && (instr->ExtractBits(20, 16) != 0)) { + VIXL_UNREACHABLE(); + } + + // We use the PostIndex mask here, as it works in this case for both Offset + // and PostIndex addressing. + switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) { + case NEON_LD1_4v: + case NEON_LD1_4v_post: + ld1(vf, ReadVRegister(reg[3]), addr[3]); + count++; + VIXL_FALLTHROUGH(); + case NEON_LD1_3v: + case NEON_LD1_3v_post: + ld1(vf, ReadVRegister(reg[2]), addr[2]); + count++; + VIXL_FALLTHROUGH(); + case NEON_LD1_2v: + case NEON_LD1_2v_post: + ld1(vf, ReadVRegister(reg[1]), addr[1]); + count++; + VIXL_FALLTHROUGH(); + case NEON_LD1_1v: + case NEON_LD1_1v_post: + ld1(vf, ReadVRegister(reg[0]), addr[0]); + break; + case NEON_ST1_4v: + case NEON_ST1_4v_post: + st1(vf, ReadVRegister(reg[3]), addr[3]); + count++; + VIXL_FALLTHROUGH(); + case NEON_ST1_3v: + case NEON_ST1_3v_post: + st1(vf, ReadVRegister(reg[2]), addr[2]); + count++; + VIXL_FALLTHROUGH(); + case NEON_ST1_2v: + case NEON_ST1_2v_post: + st1(vf, ReadVRegister(reg[1]), addr[1]); + count++; + VIXL_FALLTHROUGH(); + case NEON_ST1_1v: + case NEON_ST1_1v_post: + st1(vf, ReadVRegister(reg[0]), addr[0]); + log_read = false; + break; + case NEON_LD2_post: + case NEON_LD2: + ld2(vf, ReadVRegister(reg[0]), ReadVRegister(reg[1]), addr[0]); + count = 2; + break; + case NEON_ST2: + case NEON_ST2_post: + st2(vf, ReadVRegister(reg[0]), ReadVRegister(reg[1]), addr[0]); + count = 2; + log_read = false; + break; + case NEON_LD3_post: + case NEON_LD3: + ld3(vf, + ReadVRegister(reg[0]), + ReadVRegister(reg[1]), + ReadVRegister(reg[2]), + addr[0]); + count = 3; + break; + case NEON_ST3: + case NEON_ST3_post: + st3(vf, + ReadVRegister(reg[0]), + ReadVRegister(reg[1]), + ReadVRegister(reg[2]), + addr[0]); + count = 3; + log_read = false; + break; + case NEON_ST4: + case NEON_ST4_post: + st4(vf, + ReadVRegister(reg[0]), + ReadVRegister(reg[1]), + ReadVRegister(reg[2]), + ReadVRegister(reg[3]), + addr[0]); + count = 4; + log_read = false; + break; + case NEON_LD4_post: + case NEON_LD4: + ld4(vf, + ReadVRegister(reg[0]), + ReadVRegister(reg[1]), + ReadVRegister(reg[2]), + ReadVRegister(reg[3]), + addr[0]); + count = 4; + break; + default: + VIXL_UNIMPLEMENTED(); + } + + // Explicitly log the register update whilst we have type information. + for (int i = 0; i < count; i++) { + // For de-interleaving loads, only print the base address. + int lane_size = LaneSizeInBytesFromFormat(vf); + PrintRegisterFormat format = GetPrintRegisterFormatTryFP( + GetPrintRegisterFormatForSize(reg_size, lane_size)); + if (log_read) { + LogVRead(addr_base, reg[i], format); + } else { + LogVWrite(addr_base, reg[i], format); + } + } + + if (addr_mode == PostIndex) { + int rm = instr->GetRm(); + // The immediate post index addressing mode is indicated by rm = 31. + // The immediate is implied by the number of vector registers used. + addr_base += (rm == 31) ? RegisterSizeInBytesFromFormat(vf) * count + : ReadXRegister(rm); + WriteXRegister(instr->GetRn(), addr_base); + } else { + VIXL_ASSERT(addr_mode == Offset); + } +} + + +void Simulator::VisitNEONLoadStoreMultiStruct(const Instruction* instr) { + NEONLoadStoreMultiStructHelper(instr, Offset); +} + + +void Simulator::VisitNEONLoadStoreMultiStructPostIndex( + const Instruction* instr) { + NEONLoadStoreMultiStructHelper(instr, PostIndex); +} + + +void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr, + AddrMode addr_mode) { + uint64_t addr = ReadXRegister(instr->GetRn(), Reg31IsStackPointer); + int rt = instr->GetRt(); + + // Bit 23 determines whether this is an offset or post-index addressing mode. + // In offset mode, bits 20 to 16 should be zero; these bits encode the + // register or immediate in post-index mode. + if ((instr->ExtractBit(23) == 0) && (instr->ExtractBits(20, 16) != 0)) { + VIXL_UNREACHABLE(); + } + + // We use the PostIndex mask here, as it works in this case for both Offset + // and PostIndex addressing. + bool do_load = false; + + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap()); + VectorFormat vf_t = nfd.GetVectorFormat(); + + VectorFormat vf = kFormat16B; + switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) { + case NEON_LD1_b: + case NEON_LD1_b_post: + case NEON_LD2_b: + case NEON_LD2_b_post: + case NEON_LD3_b: + case NEON_LD3_b_post: + case NEON_LD4_b: + case NEON_LD4_b_post: + do_load = true; + VIXL_FALLTHROUGH(); + case NEON_ST1_b: + case NEON_ST1_b_post: + case NEON_ST2_b: + case NEON_ST2_b_post: + case NEON_ST3_b: + case NEON_ST3_b_post: + case NEON_ST4_b: + case NEON_ST4_b_post: + break; + + case NEON_LD1_h: + case NEON_LD1_h_post: + case NEON_LD2_h: + case NEON_LD2_h_post: + case NEON_LD3_h: + case NEON_LD3_h_post: + case NEON_LD4_h: + case NEON_LD4_h_post: + do_load = true; + VIXL_FALLTHROUGH(); + case NEON_ST1_h: + case NEON_ST1_h_post: + case NEON_ST2_h: + case NEON_ST2_h_post: + case NEON_ST3_h: + case NEON_ST3_h_post: + case NEON_ST4_h: + case NEON_ST4_h_post: + vf = kFormat8H; + break; + case NEON_LD1_s: + case NEON_LD1_s_post: + case NEON_LD2_s: + case NEON_LD2_s_post: + case NEON_LD3_s: + case NEON_LD3_s_post: + case NEON_LD4_s: + case NEON_LD4_s_post: + do_load = true; + VIXL_FALLTHROUGH(); + case NEON_ST1_s: + case NEON_ST1_s_post: + case NEON_ST2_s: + case NEON_ST2_s_post: + case NEON_ST3_s: + case NEON_ST3_s_post: + case NEON_ST4_s: + case NEON_ST4_s_post: { + VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d); + VIXL_STATIC_ASSERT((NEON_LD1_s_post | (1 << NEONLSSize_offset)) == + NEON_LD1_d_post); + VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d); + VIXL_STATIC_ASSERT((NEON_ST1_s_post | (1 << NEONLSSize_offset)) == + NEON_ST1_d_post); + vf = ((instr->GetNEONLSSize() & 1) == 0) ? kFormat4S : kFormat2D; + break; + } + + case NEON_LD1R: + case NEON_LD1R_post: { + vf = vf_t; + ld1r(vf, ReadVRegister(rt), addr); + do_load = true; + break; + } + + case NEON_LD2R: + case NEON_LD2R_post: { + vf = vf_t; + int rt2 = (rt + 1) % kNumberOfVRegisters; + ld2r(vf, ReadVRegister(rt), ReadVRegister(rt2), addr); + do_load = true; + break; + } + + case NEON_LD3R: + case NEON_LD3R_post: { + vf = vf_t; + int rt2 = (rt + 1) % kNumberOfVRegisters; + int rt3 = (rt2 + 1) % kNumberOfVRegisters; + ld3r(vf, ReadVRegister(rt), ReadVRegister(rt2), ReadVRegister(rt3), addr); + do_load = true; + break; + } + + case NEON_LD4R: + case NEON_LD4R_post: { + vf = vf_t; + int rt2 = (rt + 1) % kNumberOfVRegisters; + int rt3 = (rt2 + 1) % kNumberOfVRegisters; + int rt4 = (rt3 + 1) % kNumberOfVRegisters; + ld4r(vf, + ReadVRegister(rt), + ReadVRegister(rt2), + ReadVRegister(rt3), + ReadVRegister(rt4), + addr); + do_load = true; + break; + } + default: + VIXL_UNIMPLEMENTED(); + } + + PrintRegisterFormat print_format = + GetPrintRegisterFormatTryFP(GetPrintRegisterFormat(vf)); + // Make sure that the print_format only includes a single lane. + print_format = + static_cast(print_format & ~kPrintRegAsVectorMask); + + int esize = LaneSizeInBytesFromFormat(vf); + int index_shift = LaneSizeInBytesLog2FromFormat(vf); + int lane = instr->GetNEONLSIndex(index_shift); + int scale = 0; + int rt2 = (rt + 1) % kNumberOfVRegisters; + int rt3 = (rt2 + 1) % kNumberOfVRegisters; + int rt4 = (rt3 + 1) % kNumberOfVRegisters; + switch (instr->Mask(NEONLoadStoreSingleLenMask)) { + case NEONLoadStoreSingle1: + scale = 1; + if (do_load) { + ld1(vf, ReadVRegister(rt), lane, addr); + LogVRead(addr, rt, print_format, lane); + } else { + st1(vf, ReadVRegister(rt), lane, addr); + LogVWrite(addr, rt, print_format, lane); + } + break; + case NEONLoadStoreSingle2: + scale = 2; + if (do_load) { + ld2(vf, ReadVRegister(rt), ReadVRegister(rt2), lane, addr); + LogVRead(addr, rt, print_format, lane); + LogVRead(addr + esize, rt2, print_format, lane); + } else { + st2(vf, ReadVRegister(rt), ReadVRegister(rt2), lane, addr); + LogVWrite(addr, rt, print_format, lane); + LogVWrite(addr + esize, rt2, print_format, lane); + } + break; + case NEONLoadStoreSingle3: + scale = 3; + if (do_load) { + ld3(vf, + ReadVRegister(rt), + ReadVRegister(rt2), + ReadVRegister(rt3), + lane, + addr); + LogVRead(addr, rt, print_format, lane); + LogVRead(addr + esize, rt2, print_format, lane); + LogVRead(addr + (2 * esize), rt3, print_format, lane); + } else { + st3(vf, + ReadVRegister(rt), + ReadVRegister(rt2), + ReadVRegister(rt3), + lane, + addr); + LogVWrite(addr, rt, print_format, lane); + LogVWrite(addr + esize, rt2, print_format, lane); + LogVWrite(addr + (2 * esize), rt3, print_format, lane); + } + break; + case NEONLoadStoreSingle4: + scale = 4; + if (do_load) { + ld4(vf, + ReadVRegister(rt), + ReadVRegister(rt2), + ReadVRegister(rt3), + ReadVRegister(rt4), + lane, + addr); + LogVRead(addr, rt, print_format, lane); + LogVRead(addr + esize, rt2, print_format, lane); + LogVRead(addr + (2 * esize), rt3, print_format, lane); + LogVRead(addr + (3 * esize), rt4, print_format, lane); + } else { + st4(vf, + ReadVRegister(rt), + ReadVRegister(rt2), + ReadVRegister(rt3), + ReadVRegister(rt4), + lane, + addr); + LogVWrite(addr, rt, print_format, lane); + LogVWrite(addr + esize, rt2, print_format, lane); + LogVWrite(addr + (2 * esize), rt3, print_format, lane); + LogVWrite(addr + (3 * esize), rt4, print_format, lane); + } + break; + default: + VIXL_UNIMPLEMENTED(); + } + + if (addr_mode == PostIndex) { + int rm = instr->GetRm(); + int lane_size = LaneSizeInBytesFromFormat(vf); + WriteXRegister(instr->GetRn(), + addr + + ((rm == 31) ? (scale * lane_size) : ReadXRegister(rm))); + } +} + + +void Simulator::VisitNEONLoadStoreSingleStruct(const Instruction* instr) { + NEONLoadStoreSingleStructHelper(instr, Offset); +} + + +void Simulator::VisitNEONLoadStoreSingleStructPostIndex( + const Instruction* instr) { + NEONLoadStoreSingleStructHelper(instr, PostIndex); +} + + +void Simulator::VisitNEONModifiedImmediate(const Instruction* instr) { + SimVRegister& rd = ReadVRegister(instr->GetRd()); + int cmode = instr->GetNEONCmode(); + int cmode_3_1 = (cmode >> 1) & 7; + int cmode_3 = (cmode >> 3) & 1; + int cmode_2 = (cmode >> 2) & 1; + int cmode_1 = (cmode >> 1) & 1; + int cmode_0 = cmode & 1; + int half_enc = instr->ExtractBit(11); + int q = instr->GetNEONQ(); + int op_bit = instr->GetNEONModImmOp(); + uint64_t imm8 = instr->GetImmNEONabcdefgh(); + // Find the format and immediate value + uint64_t imm = 0; + VectorFormat vform = kFormatUndefined; + switch (cmode_3_1) { + case 0x0: + case 0x1: + case 0x2: + case 0x3: + vform = (q == 1) ? kFormat4S : kFormat2S; + imm = imm8 << (8 * cmode_3_1); + break; + case 0x4: + case 0x5: + vform = (q == 1) ? kFormat8H : kFormat4H; + imm = imm8 << (8 * cmode_1); + break; + case 0x6: + vform = (q == 1) ? kFormat4S : kFormat2S; + if (cmode_0 == 0) { + imm = imm8 << 8 | 0x000000ff; + } else { + imm = imm8 << 16 | 0x0000ffff; + } + break; + case 0x7: + if (cmode_0 == 0 && op_bit == 0) { + vform = q ? kFormat16B : kFormat8B; + imm = imm8; + } else if (cmode_0 == 0 && op_bit == 1) { + vform = q ? kFormat2D : kFormat1D; + imm = 0; + for (int i = 0; i < 8; ++i) { + if (imm8 & (1 << i)) { + imm |= (UINT64_C(0xff) << (8 * i)); + } + } + } else { // cmode_0 == 1, cmode == 0xf. + if (half_enc == 1) { + vform = q ? kFormat8H : kFormat4H; + imm = Float16ToRawbits(instr->GetImmNEONFP16()); + } else if (op_bit == 0) { + vform = q ? kFormat4S : kFormat2S; + imm = FloatToRawbits(instr->GetImmNEONFP32()); + } else if (q == 1) { + vform = kFormat2D; + imm = DoubleToRawbits(instr->GetImmNEONFP64()); + } else { + VIXL_ASSERT((q == 0) && (op_bit == 1) && (cmode == 0xf)); + VisitUnallocated(instr); + } + } + break; + default: + VIXL_UNREACHABLE(); + break; + } + + // Find the operation + NEONModifiedImmediateOp op; + if (cmode_3 == 0) { + if (cmode_0 == 0) { + op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI; + } else { // cmode<0> == '1' + op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR; + } + } else { // cmode<3> == '1' + if (cmode_2 == 0) { + if (cmode_0 == 0) { + op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI; + } else { // cmode<0> == '1' + op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR; + } + } else { // cmode<2> == '1' + if (cmode_1 == 0) { + op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI; + } else { // cmode<1> == '1' + if (cmode_0 == 0) { + op = NEONModifiedImmediate_MOVI; + } else { // cmode<0> == '1' + op = NEONModifiedImmediate_MOVI; + } + } + } + } + + // Call the logic function + if (op == NEONModifiedImmediate_ORR) { + orr(vform, rd, rd, imm); + } else if (op == NEONModifiedImmediate_BIC) { + bic(vform, rd, rd, imm); + } else if (op == NEONModifiedImmediate_MOVI) { + movi(vform, rd, imm); + } else if (op == NEONModifiedImmediate_MVNI) { + mvni(vform, rd, imm); + } else { + VisitUnimplemented(instr); + } +} + + +void Simulator::VisitNEONScalar2RegMisc(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + + if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) { + // These instructions all use a two bit size field, except NOT and RBIT, + // which use the field to encode the operation. + switch (instr->Mask(NEONScalar2RegMiscMask)) { + case NEON_CMEQ_zero_scalar: + cmp(vf, rd, rn, 0, eq); + break; + case NEON_CMGE_zero_scalar: + cmp(vf, rd, rn, 0, ge); + break; + case NEON_CMGT_zero_scalar: + cmp(vf, rd, rn, 0, gt); + break; + case NEON_CMLT_zero_scalar: + cmp(vf, rd, rn, 0, lt); + break; + case NEON_CMLE_zero_scalar: + cmp(vf, rd, rn, 0, le); + break; + case NEON_ABS_scalar: + abs(vf, rd, rn); + break; + case NEON_SQABS_scalar: + abs(vf, rd, rn).SignedSaturate(vf); + break; + case NEON_NEG_scalar: + neg(vf, rd, rn); + break; + case NEON_SQNEG_scalar: + neg(vf, rd, rn).SignedSaturate(vf); + break; + case NEON_SUQADD_scalar: + suqadd(vf, rd, rn); + break; + case NEON_USQADD_scalar: + usqadd(vf, rd, rn); + break; + default: + VIXL_UNIMPLEMENTED(); + break; + } + } else { + VectorFormat fpf = nfd.GetVectorFormat(nfd.FPScalarFormatMap()); + FPRounding fpcr_rounding = static_cast(ReadFpcr().GetRMode()); + + // These instructions all use a one bit size field, except SQXTUN, SQXTN + // and UQXTN, which use a two bit size field. + switch (instr->Mask(NEONScalar2RegMiscFPMask)) { + case NEON_FRECPE_scalar: + frecpe(fpf, rd, rn, fpcr_rounding); + break; + case NEON_FRECPX_scalar: + frecpx(fpf, rd, rn); + break; + case NEON_FRSQRTE_scalar: + frsqrte(fpf, rd, rn); + break; + case NEON_FCMGT_zero_scalar: + fcmp_zero(fpf, rd, rn, gt); + break; + case NEON_FCMGE_zero_scalar: + fcmp_zero(fpf, rd, rn, ge); + break; + case NEON_FCMEQ_zero_scalar: + fcmp_zero(fpf, rd, rn, eq); + break; + case NEON_FCMLE_zero_scalar: + fcmp_zero(fpf, rd, rn, le); + break; + case NEON_FCMLT_zero_scalar: + fcmp_zero(fpf, rd, rn, lt); + break; + case NEON_SCVTF_scalar: + scvtf(fpf, rd, rn, 0, fpcr_rounding); + break; + case NEON_UCVTF_scalar: + ucvtf(fpf, rd, rn, 0, fpcr_rounding); + break; + case NEON_FCVTNS_scalar: + fcvts(fpf, rd, rn, FPTieEven); + break; + case NEON_FCVTNU_scalar: + fcvtu(fpf, rd, rn, FPTieEven); + break; + case NEON_FCVTPS_scalar: + fcvts(fpf, rd, rn, FPPositiveInfinity); + break; + case NEON_FCVTPU_scalar: + fcvtu(fpf, rd, rn, FPPositiveInfinity); + break; + case NEON_FCVTMS_scalar: + fcvts(fpf, rd, rn, FPNegativeInfinity); + break; + case NEON_FCVTMU_scalar: + fcvtu(fpf, rd, rn, FPNegativeInfinity); + break; + case NEON_FCVTZS_scalar: + fcvts(fpf, rd, rn, FPZero); + break; + case NEON_FCVTZU_scalar: + fcvtu(fpf, rd, rn, FPZero); + break; + case NEON_FCVTAS_scalar: + fcvts(fpf, rd, rn, FPTieAway); + break; + case NEON_FCVTAU_scalar: + fcvtu(fpf, rd, rn, FPTieAway); + break; + case NEON_FCVTXN_scalar: + // Unlike all of the other FP instructions above, fcvtxn encodes dest + // size S as size<0>=1. There's only one case, so we ignore the form. + VIXL_ASSERT(instr->ExtractBit(22) == 1); + fcvtxn(kFormatS, rd, rn); + break; + default: + switch (instr->Mask(NEONScalar2RegMiscMask)) { + case NEON_SQXTN_scalar: + sqxtn(vf, rd, rn); + break; + case NEON_UQXTN_scalar: + uqxtn(vf, rd, rn); + break; + case NEON_SQXTUN_scalar: + sqxtun(vf, rd, rn); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } + } +} + + +void Simulator::VisitNEONScalar2RegMiscFP16(const Instruction* instr) { + VectorFormat fpf = kFormatH; + FPRounding fpcr_rounding = static_cast(ReadFpcr().GetRMode()); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + + switch (instr->Mask(NEONScalar2RegMiscFP16Mask)) { + case NEON_FRECPE_H_scalar: + frecpe(fpf, rd, rn, fpcr_rounding); + break; + case NEON_FRECPX_H_scalar: + frecpx(fpf, rd, rn); + break; + case NEON_FRSQRTE_H_scalar: + frsqrte(fpf, rd, rn); + break; + case NEON_FCMGT_H_zero_scalar: + fcmp_zero(fpf, rd, rn, gt); + break; + case NEON_FCMGE_H_zero_scalar: + fcmp_zero(fpf, rd, rn, ge); + break; + case NEON_FCMEQ_H_zero_scalar: + fcmp_zero(fpf, rd, rn, eq); + break; + case NEON_FCMLE_H_zero_scalar: + fcmp_zero(fpf, rd, rn, le); + break; + case NEON_FCMLT_H_zero_scalar: + fcmp_zero(fpf, rd, rn, lt); + break; + case NEON_SCVTF_H_scalar: + scvtf(fpf, rd, rn, 0, fpcr_rounding); + break; + case NEON_UCVTF_H_scalar: + ucvtf(fpf, rd, rn, 0, fpcr_rounding); + break; + case NEON_FCVTNS_H_scalar: + fcvts(fpf, rd, rn, FPTieEven); + break; + case NEON_FCVTNU_H_scalar: + fcvtu(fpf, rd, rn, FPTieEven); + break; + case NEON_FCVTPS_H_scalar: + fcvts(fpf, rd, rn, FPPositiveInfinity); + break; + case NEON_FCVTPU_H_scalar: + fcvtu(fpf, rd, rn, FPPositiveInfinity); + break; + case NEON_FCVTMS_H_scalar: + fcvts(fpf, rd, rn, FPNegativeInfinity); + break; + case NEON_FCVTMU_H_scalar: + fcvtu(fpf, rd, rn, FPNegativeInfinity); + break; + case NEON_FCVTZS_H_scalar: + fcvts(fpf, rd, rn, FPZero); + break; + case NEON_FCVTZU_H_scalar: + fcvtu(fpf, rd, rn, FPZero); + break; + case NEON_FCVTAS_H_scalar: + fcvts(fpf, rd, rn, FPTieAway); + break; + case NEON_FCVTAU_H_scalar: + fcvtu(fpf, rd, rn, FPTieAway); + break; + } +} + + +void Simulator::VisitNEONScalar3Diff(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + switch (instr->Mask(NEONScalar3DiffMask)) { + case NEON_SQDMLAL_scalar: + sqdmlal(vf, rd, rn, rm); + break; + case NEON_SQDMLSL_scalar: + sqdmlsl(vf, rd, rn, rm); + break; + case NEON_SQDMULL_scalar: + sqdmull(vf, rd, rn, rm); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONScalar3Same(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + + if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) { + vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap()); + switch (instr->Mask(NEONScalar3SameFPMask)) { + case NEON_FMULX_scalar: + fmulx(vf, rd, rn, rm); + break; + case NEON_FACGE_scalar: + fabscmp(vf, rd, rn, rm, ge); + break; + case NEON_FACGT_scalar: + fabscmp(vf, rd, rn, rm, gt); + break; + case NEON_FCMEQ_scalar: + fcmp(vf, rd, rn, rm, eq); + break; + case NEON_FCMGE_scalar: + fcmp(vf, rd, rn, rm, ge); + break; + case NEON_FCMGT_scalar: + fcmp(vf, rd, rn, rm, gt); + break; + case NEON_FRECPS_scalar: + frecps(vf, rd, rn, rm); + break; + case NEON_FRSQRTS_scalar: + frsqrts(vf, rd, rn, rm); + break; + case NEON_FABD_scalar: + fabd(vf, rd, rn, rm); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } else { + switch (instr->Mask(NEONScalar3SameMask)) { + case NEON_ADD_scalar: + add(vf, rd, rn, rm); + break; + case NEON_SUB_scalar: + sub(vf, rd, rn, rm); + break; + case NEON_CMEQ_scalar: + cmp(vf, rd, rn, rm, eq); + break; + case NEON_CMGE_scalar: + cmp(vf, rd, rn, rm, ge); + break; + case NEON_CMGT_scalar: + cmp(vf, rd, rn, rm, gt); + break; + case NEON_CMHI_scalar: + cmp(vf, rd, rn, rm, hi); + break; + case NEON_CMHS_scalar: + cmp(vf, rd, rn, rm, hs); + break; + case NEON_CMTST_scalar: + cmptst(vf, rd, rn, rm); + break; + case NEON_USHL_scalar: + ushl(vf, rd, rn, rm); + break; + case NEON_SSHL_scalar: + sshl(vf, rd, rn, rm); + break; + case NEON_SQDMULH_scalar: + sqdmulh(vf, rd, rn, rm); + break; + case NEON_SQRDMULH_scalar: + sqrdmulh(vf, rd, rn, rm); + break; + case NEON_UQADD_scalar: + add(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQADD_scalar: + add(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_UQSUB_scalar: + sub(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQSUB_scalar: + sub(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_UQSHL_scalar: + ushl(vf, rd, rn, rm).UnsignedSaturate(vf); + break; + case NEON_SQSHL_scalar: + sshl(vf, rd, rn, rm).SignedSaturate(vf); + break; + case NEON_URSHL_scalar: + ushl(vf, rd, rn, rm).Round(vf); + break; + case NEON_SRSHL_scalar: + sshl(vf, rd, rn, rm).Round(vf); + break; + case NEON_UQRSHL_scalar: + ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf); + break; + case NEON_SQRSHL_scalar: + sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf); + break; + default: + VIXL_UNIMPLEMENTED(); + } + } +} + +void Simulator::VisitNEONScalar3SameFP16(const Instruction* instr) { + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + + switch (instr->Mask(NEONScalar3SameFP16Mask)) { + case NEON_FABD_H_scalar: + fabd(kFormatH, rd, rn, rm); + break; + case NEON_FMULX_H_scalar: + fmulx(kFormatH, rd, rn, rm); + break; + case NEON_FCMEQ_H_scalar: + fcmp(kFormatH, rd, rn, rm, eq); + break; + case NEON_FCMGE_H_scalar: + fcmp(kFormatH, rd, rn, rm, ge); + break; + case NEON_FCMGT_H_scalar: + fcmp(kFormatH, rd, rn, rm, gt); + break; + case NEON_FACGE_H_scalar: + fabscmp(kFormatH, rd, rn, rm, ge); + break; + case NEON_FACGT_H_scalar: + fabscmp(kFormatH, rd, rn, rm, gt); + break; + case NEON_FRECPS_H_scalar: + frecps(kFormatH, rd, rn, rm); + break; + case NEON_FRSQRTS_H_scalar: + frsqrts(kFormatH, rd, rn, rm); + break; + default: + VIXL_UNREACHABLE(); + } +} + + +void Simulator::VisitNEONScalar3SameExtra(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + + switch (instr->Mask(NEONScalar3SameExtraMask)) { + case NEON_SQRDMLAH_scalar: + sqrdmlah(vf, rd, rn, rm); + break; + case NEON_SQRDMLSH_scalar: + sqrdmlsh(vf, rd, rn, rm); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + +void Simulator::VisitNEONScalarByIndexedElement(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + VectorFormat vf_r = nfd.GetVectorFormat(nfd.ScalarFormatMap()); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + ByElementOp Op = NULL; + + int rm_reg = instr->GetRm(); + int index = (instr->GetNEONH() << 1) | instr->GetNEONL(); + if (instr->GetNEONSize() == 1) { + rm_reg &= 0xf; + index = (index << 1) | instr->GetNEONM(); + } + + switch (instr->Mask(NEONScalarByIndexedElementMask)) { + case NEON_SQDMULL_byelement_scalar: + Op = &Simulator::sqdmull; + break; + case NEON_SQDMLAL_byelement_scalar: + Op = &Simulator::sqdmlal; + break; + case NEON_SQDMLSL_byelement_scalar: + Op = &Simulator::sqdmlsl; + break; + case NEON_SQDMULH_byelement_scalar: + Op = &Simulator::sqdmulh; + vf = vf_r; + break; + case NEON_SQRDMULH_byelement_scalar: + Op = &Simulator::sqrdmulh; + vf = vf_r; + break; + case NEON_SQRDMLAH_byelement_scalar: + Op = &Simulator::sqrdmlah; + vf = vf_r; + break; + case NEON_SQRDMLSH_byelement_scalar: + Op = &Simulator::sqrdmlsh; + vf = vf_r; + break; + default: + vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap()); + index = instr->GetNEONH(); + if (instr->GetFPType() == 0) { + index = (index << 2) | (instr->GetNEONL() << 1) | instr->GetNEONM(); + rm_reg &= 0xf; + vf = kFormatH; + } else if ((instr->GetFPType() & 1) == 0) { + index = (index << 1) | instr->GetNEONL(); + } + switch (instr->Mask(NEONScalarByIndexedElementFPMask)) { + case NEON_FMUL_H_byelement_scalar: + case NEON_FMUL_byelement_scalar: + Op = &Simulator::fmul; + break; + case NEON_FMLA_H_byelement_scalar: + case NEON_FMLA_byelement_scalar: + Op = &Simulator::fmla; + break; + case NEON_FMLS_H_byelement_scalar: + case NEON_FMLS_byelement_scalar: + Op = &Simulator::fmls; + break; + case NEON_FMULX_H_byelement_scalar: + case NEON_FMULX_byelement_scalar: + Op = &Simulator::fmulx; + break; + default: + VIXL_UNIMPLEMENTED(); + } + } + + (this->*Op)(vf, rd, rn, ReadVRegister(rm_reg), index); +} + + +void Simulator::VisitNEONScalarCopy(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + + if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) { + int imm5 = instr->GetImmNEON5(); + int tz = CountTrailingZeros(imm5, 32); + int rn_index = imm5 >> (tz + 1); + dup_element(vf, rd, rn, rn_index); + } else { + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONScalarPairwise(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarPairwiseFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + switch (instr->Mask(NEONScalarPairwiseMask)) { + case NEON_ADDP_scalar: { + // All pairwise operations except ADDP use bit U to differentiate FP16 + // from FP32/FP64 variations. + NEONFormatDecoder nfd_addp(instr, NEONFormatDecoder::FPScalarFormatMap()); + addp(nfd_addp.GetVectorFormat(), rd, rn); + break; + } + case NEON_FADDP_h_scalar: + case NEON_FADDP_scalar: + faddp(vf, rd, rn); + break; + case NEON_FMAXP_h_scalar: + case NEON_FMAXP_scalar: + fmaxp(vf, rd, rn); + break; + case NEON_FMAXNMP_h_scalar: + case NEON_FMAXNMP_scalar: + fmaxnmp(vf, rd, rn); + break; + case NEON_FMINP_h_scalar: + case NEON_FMINP_scalar: + fminp(vf, rd, rn); + break; + case NEON_FMINNMP_h_scalar: + case NEON_FMINNMP_scalar: + fminnmp(vf, rd, rn); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONScalarShiftImmediate(const Instruction* instr) { + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + FPRounding fpcr_rounding = static_cast(ReadFpcr().GetRMode()); + + static const NEONFormatMap map = {{22, 21, 20, 19}, + {NF_UNDEF, + NF_B, + NF_H, + NF_H, + NF_S, + NF_S, + NF_S, + NF_S, + NF_D, + NF_D, + NF_D, + NF_D, + NF_D, + NF_D, + NF_D, + NF_D}}; + NEONFormatDecoder nfd(instr, &map); + VectorFormat vf = nfd.GetVectorFormat(); + + int highestSetBit = HighestSetBitPosition(instr->GetImmNEONImmh()); + int immhimmb = instr->GetImmNEONImmhImmb(); + int right_shift = (16 << highestSetBit) - immhimmb; + int left_shift = immhimmb - (8 << highestSetBit); + switch (instr->Mask(NEONScalarShiftImmediateMask)) { + case NEON_SHL_scalar: + shl(vf, rd, rn, left_shift); + break; + case NEON_SLI_scalar: + sli(vf, rd, rn, left_shift); + break; + case NEON_SQSHL_imm_scalar: + sqshl(vf, rd, rn, left_shift); + break; + case NEON_UQSHL_imm_scalar: + uqshl(vf, rd, rn, left_shift); + break; + case NEON_SQSHLU_scalar: + sqshlu(vf, rd, rn, left_shift); + break; + case NEON_SRI_scalar: + sri(vf, rd, rn, right_shift); + break; + case NEON_SSHR_scalar: + sshr(vf, rd, rn, right_shift); + break; + case NEON_USHR_scalar: + ushr(vf, rd, rn, right_shift); + break; + case NEON_SRSHR_scalar: + sshr(vf, rd, rn, right_shift).Round(vf); + break; + case NEON_URSHR_scalar: + ushr(vf, rd, rn, right_shift).Round(vf); + break; + case NEON_SSRA_scalar: + ssra(vf, rd, rn, right_shift); + break; + case NEON_USRA_scalar: + usra(vf, rd, rn, right_shift); + break; + case NEON_SRSRA_scalar: + srsra(vf, rd, rn, right_shift); + break; + case NEON_URSRA_scalar: + ursra(vf, rd, rn, right_shift); + break; + case NEON_UQSHRN_scalar: + uqshrn(vf, rd, rn, right_shift); + break; + case NEON_UQRSHRN_scalar: + uqrshrn(vf, rd, rn, right_shift); + break; + case NEON_SQSHRN_scalar: + sqshrn(vf, rd, rn, right_shift); + break; + case NEON_SQRSHRN_scalar: + sqrshrn(vf, rd, rn, right_shift); + break; + case NEON_SQSHRUN_scalar: + sqshrun(vf, rd, rn, right_shift); + break; + case NEON_SQRSHRUN_scalar: + sqrshrun(vf, rd, rn, right_shift); + break; + case NEON_FCVTZS_imm_scalar: + fcvts(vf, rd, rn, FPZero, right_shift); + break; + case NEON_FCVTZU_imm_scalar: + fcvtu(vf, rd, rn, FPZero, right_shift); + break; + case NEON_SCVTF_imm_scalar: + scvtf(vf, rd, rn, right_shift, fpcr_rounding); + break; + case NEON_UCVTF_imm_scalar: + ucvtf(vf, rd, rn, right_shift, fpcr_rounding); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONShiftImmediate(const Instruction* instr) { + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + FPRounding fpcr_rounding = static_cast(ReadFpcr().GetRMode()); + + // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H, + // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined. + static const NEONFormatMap map = {{22, 21, 20, 19, 30}, + {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, + NF_4H, NF_8H, NF_4H, NF_8H, + NF_2S, NF_4S, NF_2S, NF_4S, + NF_2S, NF_4S, NF_2S, NF_4S, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, + NF_UNDEF, NF_2D, NF_UNDEF, NF_2D}}; + NEONFormatDecoder nfd(instr, &map); + VectorFormat vf = nfd.GetVectorFormat(); + + // 0001->8H, 001x->4S, 01xx->2D, all others undefined. + static const NEONFormatMap map_l = + {{22, 21, 20, 19}, + {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}}; + VectorFormat vf_l = nfd.GetVectorFormat(&map_l); + + int highestSetBit = HighestSetBitPosition(instr->GetImmNEONImmh()); + int immhimmb = instr->GetImmNEONImmhImmb(); + int right_shift = (16 << highestSetBit) - immhimmb; + int left_shift = immhimmb - (8 << highestSetBit); + + switch (instr->Mask(NEONShiftImmediateMask)) { + case NEON_SHL: + shl(vf, rd, rn, left_shift); + break; + case NEON_SLI: + sli(vf, rd, rn, left_shift); + break; + case NEON_SQSHLU: + sqshlu(vf, rd, rn, left_shift); + break; + case NEON_SRI: + sri(vf, rd, rn, right_shift); + break; + case NEON_SSHR: + sshr(vf, rd, rn, right_shift); + break; + case NEON_USHR: + ushr(vf, rd, rn, right_shift); + break; + case NEON_SRSHR: + sshr(vf, rd, rn, right_shift).Round(vf); + break; + case NEON_URSHR: + ushr(vf, rd, rn, right_shift).Round(vf); + break; + case NEON_SSRA: + ssra(vf, rd, rn, right_shift); + break; + case NEON_USRA: + usra(vf, rd, rn, right_shift); + break; + case NEON_SRSRA: + srsra(vf, rd, rn, right_shift); + break; + case NEON_URSRA: + ursra(vf, rd, rn, right_shift); + break; + case NEON_SQSHL_imm: + sqshl(vf, rd, rn, left_shift); + break; + case NEON_UQSHL_imm: + uqshl(vf, rd, rn, left_shift); + break; + case NEON_SCVTF_imm: + scvtf(vf, rd, rn, right_shift, fpcr_rounding); + break; + case NEON_UCVTF_imm: + ucvtf(vf, rd, rn, right_shift, fpcr_rounding); + break; + case NEON_FCVTZS_imm: + fcvts(vf, rd, rn, FPZero, right_shift); + break; + case NEON_FCVTZU_imm: + fcvtu(vf, rd, rn, FPZero, right_shift); + break; + case NEON_SSHLL: + vf = vf_l; + if (instr->Mask(NEON_Q)) { + sshll2(vf, rd, rn, left_shift); + } else { + sshll(vf, rd, rn, left_shift); + } + break; + case NEON_USHLL: + vf = vf_l; + if (instr->Mask(NEON_Q)) { + ushll2(vf, rd, rn, left_shift); + } else { + ushll(vf, rd, rn, left_shift); + } + break; + case NEON_SHRN: + if (instr->Mask(NEON_Q)) { + shrn2(vf, rd, rn, right_shift); + } else { + shrn(vf, rd, rn, right_shift); + } + break; + case NEON_RSHRN: + if (instr->Mask(NEON_Q)) { + rshrn2(vf, rd, rn, right_shift); + } else { + rshrn(vf, rd, rn, right_shift); + } + break; + case NEON_UQSHRN: + if (instr->Mask(NEON_Q)) { + uqshrn2(vf, rd, rn, right_shift); + } else { + uqshrn(vf, rd, rn, right_shift); + } + break; + case NEON_UQRSHRN: + if (instr->Mask(NEON_Q)) { + uqrshrn2(vf, rd, rn, right_shift); + } else { + uqrshrn(vf, rd, rn, right_shift); + } + break; + case NEON_SQSHRN: + if (instr->Mask(NEON_Q)) { + sqshrn2(vf, rd, rn, right_shift); + } else { + sqshrn(vf, rd, rn, right_shift); + } + break; + case NEON_SQRSHRN: + if (instr->Mask(NEON_Q)) { + sqrshrn2(vf, rd, rn, right_shift); + } else { + sqrshrn(vf, rd, rn, right_shift); + } + break; + case NEON_SQSHRUN: + if (instr->Mask(NEON_Q)) { + sqshrun2(vf, rd, rn, right_shift); + } else { + sqshrun(vf, rd, rn, right_shift); + } + break; + case NEON_SQRSHRUN: + if (instr->Mask(NEON_Q)) { + sqrshrun2(vf, rd, rn, right_shift); + } else { + sqrshrun(vf, rd, rn, right_shift); + } + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONTable(const Instruction* instr) { + NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap()); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rn2 = ReadVRegister((instr->GetRn() + 1) % kNumberOfVRegisters); + SimVRegister& rn3 = ReadVRegister((instr->GetRn() + 2) % kNumberOfVRegisters); + SimVRegister& rn4 = ReadVRegister((instr->GetRn() + 3) % kNumberOfVRegisters); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + + switch (instr->Mask(NEONTableMask)) { + case NEON_TBL_1v: + tbl(vf, rd, rn, rm); + break; + case NEON_TBL_2v: + tbl(vf, rd, rn, rn2, rm); + break; + case NEON_TBL_3v: + tbl(vf, rd, rn, rn2, rn3, rm); + break; + case NEON_TBL_4v: + tbl(vf, rd, rn, rn2, rn3, rn4, rm); + break; + case NEON_TBX_1v: + tbx(vf, rd, rn, rm); + break; + case NEON_TBX_2v: + tbx(vf, rd, rn, rn2, rm); + break; + case NEON_TBX_3v: + tbx(vf, rd, rn, rn2, rn3, rm); + break; + case NEON_TBX_4v: + tbx(vf, rd, rn, rn2, rn3, rn4, rm); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::VisitNEONPerm(const Instruction* instr) { + NEONFormatDecoder nfd(instr); + VectorFormat vf = nfd.GetVectorFormat(); + + SimVRegister& rd = ReadVRegister(instr->GetRd()); + SimVRegister& rn = ReadVRegister(instr->GetRn()); + SimVRegister& rm = ReadVRegister(instr->GetRm()); + + switch (instr->Mask(NEONPermMask)) { + case NEON_TRN1: + trn1(vf, rd, rn, rm); + break; + case NEON_TRN2: + trn2(vf, rd, rn, rm); + break; + case NEON_UZP1: + uzp1(vf, rd, rn, rm); + break; + case NEON_UZP2: + uzp2(vf, rd, rn, rm); + break; + case NEON_ZIP1: + zip1(vf, rd, rn, rm); + break; + case NEON_ZIP2: + zip2(vf, rd, rn, rm); + break; + default: + VIXL_UNIMPLEMENTED(); + } +} + + +void Simulator::DoUnreachable(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && + (instr->GetImmException() == kUnreachableOpcode)); + + fprintf(stream_, + "Hit UNREACHABLE marker at pc=%p.\n", + reinterpret_cast(instr)); + abort(); +} + + +void Simulator::DoTrace(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && + (instr->GetImmException() == kTraceOpcode)); + + // Read the arguments encoded inline in the instruction stream. + uint32_t parameters; + uint32_t command; + + VIXL_STATIC_ASSERT(sizeof(*instr) == 1); + memcpy(¶meters, instr + kTraceParamsOffset, sizeof(parameters)); + memcpy(&command, instr + kTraceCommandOffset, sizeof(command)); + + switch (command) { + case TRACE_ENABLE: + SetTraceParameters(GetTraceParameters() | parameters); + break; + case TRACE_DISABLE: + SetTraceParameters(GetTraceParameters() & ~parameters); + break; + default: + VIXL_UNREACHABLE(); + } + + WritePc(instr->GetInstructionAtOffset(kTraceLength)); +} + + +void Simulator::DoLog(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && + (instr->GetImmException() == kLogOpcode)); + + // Read the arguments encoded inline in the instruction stream. + uint32_t parameters; + + VIXL_STATIC_ASSERT(sizeof(*instr) == 1); + memcpy(¶meters, instr + kTraceParamsOffset, sizeof(parameters)); + + // We don't support a one-shot LOG_DISASM. + VIXL_ASSERT((parameters & LOG_DISASM) == 0); + // Print the requested information. + if (parameters & LOG_SYSREGS) PrintSystemRegisters(); + if (parameters & LOG_REGS) PrintRegisters(); + if (parameters & LOG_VREGS) PrintVRegisters(); + + WritePc(instr->GetInstructionAtOffset(kLogLength)); +} + + +void Simulator::DoPrintf(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && + (instr->GetImmException() == kPrintfOpcode)); + + // Read the arguments encoded inline in the instruction stream. + uint32_t arg_count; + uint32_t arg_pattern_list; + VIXL_STATIC_ASSERT(sizeof(*instr) == 1); + memcpy(&arg_count, instr + kPrintfArgCountOffset, sizeof(arg_count)); + memcpy(&arg_pattern_list, + instr + kPrintfArgPatternListOffset, + sizeof(arg_pattern_list)); + + VIXL_ASSERT(arg_count <= kPrintfMaxArgCount); + VIXL_ASSERT((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0); + + // We need to call the host printf function with a set of arguments defined by + // arg_pattern_list. Because we don't know the types and sizes of the + // arguments, this is very difficult to do in a robust and portable way. To + // work around the problem, we pick apart the format string, and print one + // format placeholder at a time. + + // Allocate space for the format string. We take a copy, so we can modify it. + // Leave enough space for one extra character per expected argument (plus the + // '\0' termination). + const char* format_base = ReadRegister(0); + VIXL_ASSERT(format_base != NULL); + size_t length = strlen(format_base) + 1; + char* const format = new char[length + arg_count]; + + // A list of chunks, each with exactly one format placeholder. + const char* chunks[kPrintfMaxArgCount]; + + // Copy the format string and search for format placeholders. + uint32_t placeholder_count = 0; + char* format_scratch = format; + for (size_t i = 0; i < length; i++) { + if (format_base[i] != '%') { + *format_scratch++ = format_base[i]; + } else { + if (format_base[i + 1] == '%') { + // Ignore explicit "%%" sequences. + *format_scratch++ = format_base[i]; + i++; + // Chunks after the first are passed as format strings to printf, so we + // need to escape '%' characters in those chunks. + if (placeholder_count > 0) *format_scratch++ = format_base[i]; + } else { + VIXL_CHECK(placeholder_count < arg_count); + // Insert '\0' before placeholders, and store their locations. + *format_scratch++ = '\0'; + chunks[placeholder_count++] = format_scratch; + *format_scratch++ = format_base[i]; + } + } + } + VIXL_CHECK(placeholder_count == arg_count); + + // Finally, call printf with each chunk, passing the appropriate register + // argument. Normally, printf returns the number of bytes transmitted, so we + // can emulate a single printf call by adding the result from each chunk. If + // any call returns a negative (error) value, though, just return that value. + + printf("%s", clr_printf); + + // Because '\0' is inserted before each placeholder, the first string in + // 'format' contains no format placeholders and should be printed literally. + int result = printf("%s", format); + int pcs_r = 1; // Start at x1. x0 holds the format string. + int pcs_f = 0; // Start at d0. + if (result >= 0) { + for (uint32_t i = 0; i < placeholder_count; i++) { + int part_result = -1; + + uint32_t arg_pattern = arg_pattern_list >> (i * kPrintfArgPatternBits); + arg_pattern &= (1 << kPrintfArgPatternBits) - 1; + switch (arg_pattern) { + case kPrintfArgW: + part_result = printf(chunks[i], ReadWRegister(pcs_r++)); + break; + case kPrintfArgX: + part_result = printf(chunks[i], ReadXRegister(pcs_r++)); + break; + case kPrintfArgD: + part_result = printf(chunks[i], ReadDRegister(pcs_f++)); + break; + default: + VIXL_UNREACHABLE(); + } + + if (part_result < 0) { + // Handle error values. + result = part_result; + break; + } + + result += part_result; + } + } + + printf("%s", clr_normal); + + // Printf returns its result in x0 (just like the C library's printf). + WriteXRegister(0, result); + + // The printf parameters are inlined in the code, so skip them. + WritePc(instr->GetInstructionAtOffset(kPrintfLength)); + + // Set LR as if we'd just called a native printf function. + WriteLr(ReadPc()); + + delete[] format; +} + + +#ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT +void Simulator::DoRuntimeCall(const Instruction* instr) { + VIXL_STATIC_ASSERT(kRuntimeCallAddressSize == sizeof(uintptr_t)); + // The appropriate `Simulator::SimulateRuntimeCall()` wrapper and the function + // to call are passed inlined in the assembly. + uintptr_t call_wrapper_address = + Memory::Read(instr + kRuntimeCallWrapperOffset); + uintptr_t function_address = + Memory::Read(instr + kRuntimeCallFunctionOffset); + RuntimeCallType call_type = static_cast( + Memory::Read(instr + kRuntimeCallTypeOffset)); + auto runtime_call_wrapper = + reinterpret_cast(call_wrapper_address); + + if (call_type == kCallRuntime) { + WriteRegister(kLinkRegCode, + instr->GetInstructionAtOffset(kRuntimeCallLength)); + } + runtime_call_wrapper(this, function_address); + // Read the return address from `lr` and write it into `pc`. + WritePc(ReadRegister(kLinkRegCode)); +} +#else +void Simulator::DoRuntimeCall(const Instruction* instr) { + USE(instr); + VIXL_UNREACHABLE(); +} +#endif + + +void Simulator::DoConfigureCPUFeatures(const Instruction* instr) { + VIXL_ASSERT(instr->Mask(ExceptionMask) == HLT); + + typedef ConfigureCPUFeaturesElementType ElementType; + VIXL_ASSERT(CPUFeatures::kNumberOfFeatures < + std::numeric_limits::max()); + + // k{Set,Enable,Disable}CPUFeatures have the same parameter encoding. + + size_t element_size = sizeof(ElementType); + size_t offset = kConfigureCPUFeaturesListOffset; + + // Read the kNone-terminated list of features. + CPUFeatures parameters; + while (true) { + ElementType feature = Memory::Read(instr + offset); + offset += element_size; + if (feature == static_cast(CPUFeatures::kNone)) break; + parameters.Combine(static_cast(feature)); + } + + switch (instr->GetImmException()) { + case kSetCPUFeaturesOpcode: + SetCPUFeatures(parameters); + break; + case kEnableCPUFeaturesOpcode: + GetCPUFeatures()->Combine(parameters); + break; + case kDisableCPUFeaturesOpcode: + GetCPUFeatures()->Remove(parameters); + break; + default: + VIXL_UNREACHABLE(); + break; + } + + WritePc(instr->GetInstructionAtOffset(AlignUp(offset, kInstructionSize))); +} + + +void Simulator::DoSaveCPUFeatures(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && + (instr->GetImmException() == kSaveCPUFeaturesOpcode)); + USE(instr); + + saved_cpu_features_.push_back(*GetCPUFeatures()); +} + + +void Simulator::DoRestoreCPUFeatures(const Instruction* instr) { + VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) && + (instr->GetImmException() == kRestoreCPUFeaturesOpcode)); + USE(instr); + + SetCPUFeatures(saved_cpu_features_.back()); + saved_cpu_features_.pop_back(); +} + + +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_INCLUDE_SIMULATOR_AARCH64 diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/simulator-aarch64.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/simulator-aarch64.h new file mode 100644 index 00000000..d4080531 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/simulator-aarch64.h @@ -0,0 +1,3371 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_SIMULATOR_AARCH64_H_ +#define VIXL_AARCH64_SIMULATOR_AARCH64_H_ + +#include + +#include "../globals-vixl.h" +#include "../utils-vixl.h" + +#include "../cpu-features.h" +#include "abi-aarch64.h" +#include "cpu-features-auditor-aarch64.h" +#include "disasm-aarch64.h" +#include "instructions-aarch64.h" +#include "instrument-aarch64.h" +#include "simulator-constants-aarch64.h" + +//#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64 +// These are only used for the ABI feature, and depend on checks performed for +// it. +#ifdef VIXL_HAS_ABI_SUPPORT +#include +#if __cplusplus >= 201402L +// Required for `std::index_sequence` +#include +#endif +#endif + +namespace vixl { +namespace aarch64 { + +// Representation of memory, with typed getters and setters for access. +class Memory { + public: + template + static T AddressUntag(T address) { + // Cast the address using a C-style cast. A reinterpret_cast would be + // appropriate, but it can't cast one integral type to another. + uint64_t bits = (uint64_t)address; + return (T)(bits & ~kAddressTagMask); + } + + template + static T Read(A address) { + T value; + address = AddressUntag(address); + VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) || + (sizeof(value) == 4) || (sizeof(value) == 8) || + (sizeof(value) == 16)); + memcpy(&value, reinterpret_cast(address), sizeof(value)); + return value; + } + + template + static void Write(A address, T value) { + address = AddressUntag(address); + VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) || + (sizeof(value) == 4) || (sizeof(value) == 8) || + (sizeof(value) == 16)); + memcpy(reinterpret_cast(address), &value, sizeof(value)); + } +}; + +// Represent a register (r0-r31, v0-v31). +template +class SimRegisterBase { + public: + SimRegisterBase() : written_since_last_log_(false) {} + + // Write the specified value. The value is zero-extended if necessary. + template + void Write(T new_value) { + if (sizeof(new_value) < kSizeInBytes) { + // All AArch64 registers are zero-extending. + memset(value_ + sizeof(new_value), 0, kSizeInBytes - sizeof(new_value)); + } + WriteLane(new_value, 0); + NotifyRegisterWrite(); + } + template + VIXL_DEPRECATED("Write", void Set(T new_value)) { + Write(new_value); + } + + // Insert a typed value into a register, leaving the rest of the register + // unchanged. The lane parameter indicates where in the register the value + // should be inserted, in the range [ 0, sizeof(value_) / sizeof(T) ), where + // 0 represents the least significant bits. + template + void Insert(int lane, T new_value) { + WriteLane(new_value, lane); + NotifyRegisterWrite(); + } + + // Get the value as the specified type. The value is truncated if necessary. + template + T Get() const { + return GetLane(0); + } + + // Get the lane value as the specified type. The value is truncated if + // necessary. + template + T GetLane(int lane) const { + T result; + ReadLane(&result, lane); + return result; + } + template + VIXL_DEPRECATED("GetLane", T Get(int lane) const) { + return GetLane(lane); + } + + // TODO: Make this return a map of updated bytes, so that we can highlight + // updated lanes for load-and-insert. (That never happens for scalar code, but + // NEON has some instructions that can update individual lanes.) + bool WrittenSinceLastLog() const { return written_since_last_log_; } + + void NotifyRegisterLogged() { written_since_last_log_ = false; } + + protected: + uint8_t value_[kSizeInBytes]; + + // Helpers to aid with register tracing. + bool written_since_last_log_; + + void NotifyRegisterWrite() { written_since_last_log_ = true; } + + private: + template + void ReadLane(T* dst, int lane) const { + VIXL_ASSERT(lane >= 0); + VIXL_ASSERT((sizeof(*dst) + (lane * sizeof(*dst))) <= kSizeInBytes); + memcpy(dst, &value_[lane * sizeof(*dst)], sizeof(*dst)); + } + + template + void WriteLane(T src, int lane) { + VIXL_ASSERT(lane >= 0); + VIXL_ASSERT((sizeof(src) + (lane * sizeof(src))) <= kSizeInBytes); + memcpy(&value_[lane * sizeof(src)], &src, sizeof(src)); + } +}; +typedef SimRegisterBase SimRegister; // r0-r31 +typedef SimRegisterBase SimVRegister; // v0-v31 + +// The default ReadLane and WriteLane methods assume what we are copying is +// "trivially copyable" by using memcpy. We have to provide alternative +// implementations for SimFloat16 which cannot be copied this way. + +template <> +template <> +inline void SimVRegister::ReadLane(vixl::internal::SimFloat16* dst, + int lane) const { + uint16_t rawbits; + ReadLane(&rawbits, lane); + *dst = RawbitsToFloat16(rawbits); +} + +template <> +template <> +inline void SimVRegister::WriteLane(vixl::internal::SimFloat16 src, int lane) { + WriteLane(Float16ToRawbits(src), lane); +} + +// Representation of a vector register, with typed getters and setters for lanes +// and additional information to represent lane state. +class LogicVRegister { + public: + inline LogicVRegister( + SimVRegister& other) // NOLINT(runtime/references)(runtime/explicit) + : register_(other) { + for (size_t i = 0; i < ArrayLength(saturated_); i++) { + saturated_[i] = kNotSaturated; + } + for (size_t i = 0; i < ArrayLength(round_); i++) { + round_[i] = 0; + } + } + + int64_t Int(VectorFormat vform, int index) const { + int64_t element; + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: + element = register_.GetLane(index); + break; + case 16: + element = register_.GetLane(index); + break; + case 32: + element = register_.GetLane(index); + break; + case 64: + element = register_.GetLane(index); + break; + default: + VIXL_UNREACHABLE(); + return 0; + } + return element; + } + + uint64_t Uint(VectorFormat vform, int index) const { + uint64_t element; + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: + element = register_.GetLane(index); + break; + case 16: + element = register_.GetLane(index); + break; + case 32: + element = register_.GetLane(index); + break; + case 64: + element = register_.GetLane(index); + break; + default: + VIXL_UNREACHABLE(); + return 0; + } + return element; + } + + uint64_t UintLeftJustified(VectorFormat vform, int index) const { + return Uint(vform, index) << (64 - LaneSizeInBitsFromFormat(vform)); + } + + int64_t IntLeftJustified(VectorFormat vform, int index) const { + uint64_t value = UintLeftJustified(vform, index); + int64_t result; + memcpy(&result, &value, sizeof(result)); + return result; + } + + void SetInt(VectorFormat vform, int index, int64_t value) const { + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: + register_.Insert(index, static_cast(value)); + break; + case 16: + register_.Insert(index, static_cast(value)); + break; + case 32: + register_.Insert(index, static_cast(value)); + break; + case 64: + register_.Insert(index, static_cast(value)); + break; + default: + VIXL_UNREACHABLE(); + return; + } + } + + void SetIntArray(VectorFormat vform, const int64_t* src) const { + ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SetInt(vform, i, src[i]); + } + } + + void SetUint(VectorFormat vform, int index, uint64_t value) const { + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: + register_.Insert(index, static_cast(value)); + break; + case 16: + register_.Insert(index, static_cast(value)); + break; + case 32: + register_.Insert(index, static_cast(value)); + break; + case 64: + register_.Insert(index, static_cast(value)); + break; + default: + VIXL_UNREACHABLE(); + return; + } + } + + void SetUintArray(VectorFormat vform, const uint64_t* src) const { + ClearForWrite(vform); + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SetUint(vform, i, src[i]); + } + } + + void ReadUintFromMem(VectorFormat vform, int index, uint64_t addr) const { + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: + register_.Insert(index, Memory::Read(addr)); + break; + case 16: + register_.Insert(index, Memory::Read(addr)); + break; + case 32: + register_.Insert(index, Memory::Read(addr)); + break; + case 64: + register_.Insert(index, Memory::Read(addr)); + break; + default: + VIXL_UNREACHABLE(); + return; + } + } + + void WriteUintToMem(VectorFormat vform, int index, uint64_t addr) const { + uint64_t value = Uint(vform, index); + switch (LaneSizeInBitsFromFormat(vform)) { + case 8: + Memory::Write(addr, static_cast(value)); + break; + case 16: + Memory::Write(addr, static_cast(value)); + break; + case 32: + Memory::Write(addr, static_cast(value)); + break; + case 64: + Memory::Write(addr, value); + break; + } + } + + template + T Float(int index) const { + return register_.GetLane(index); + } + + template + void SetFloat(int index, T value) const { + register_.Insert(index, value); + } + + // When setting a result in a register of size less than Q, the top bits of + // the Q register must be cleared. + void ClearForWrite(VectorFormat vform) const { + unsigned size = RegisterSizeInBytesFromFormat(vform); + for (unsigned i = size; i < kQRegSizeInBytes; i++) { + SetUint(kFormat16B, i, 0); + } + } + + // Saturation state for each lane of a vector. + enum Saturation { + kNotSaturated = 0, + kSignedSatPositive = 1 << 0, + kSignedSatNegative = 1 << 1, + kSignedSatMask = kSignedSatPositive | kSignedSatNegative, + kSignedSatUndefined = kSignedSatMask, + kUnsignedSatPositive = 1 << 2, + kUnsignedSatNegative = 1 << 3, + kUnsignedSatMask = kUnsignedSatPositive | kUnsignedSatNegative, + kUnsignedSatUndefined = kUnsignedSatMask + }; + + // Getters for saturation state. + Saturation GetSignedSaturation(int index) { + return static_cast(saturated_[index] & kSignedSatMask); + } + + Saturation GetUnsignedSaturation(int index) { + return static_cast(saturated_[index] & kUnsignedSatMask); + } + + // Setters for saturation state. + void ClearSat(int index) { saturated_[index] = kNotSaturated; } + + void SetSignedSat(int index, bool positive) { + SetSatFlag(index, positive ? kSignedSatPositive : kSignedSatNegative); + } + + void SetUnsignedSat(int index, bool positive) { + SetSatFlag(index, positive ? kUnsignedSatPositive : kUnsignedSatNegative); + } + + void SetSatFlag(int index, Saturation sat) { + saturated_[index] = static_cast(saturated_[index] | sat); + VIXL_ASSERT((sat & kUnsignedSatMask) != kUnsignedSatUndefined); + VIXL_ASSERT((sat & kSignedSatMask) != kSignedSatUndefined); + } + + // Saturate lanes of a vector based on saturation state. + LogicVRegister& SignedSaturate(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + Saturation sat = GetSignedSaturation(i); + if (sat == kSignedSatPositive) { + SetInt(vform, i, MaxIntFromFormat(vform)); + } else if (sat == kSignedSatNegative) { + SetInt(vform, i, MinIntFromFormat(vform)); + } + } + return *this; + } + + LogicVRegister& UnsignedSaturate(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + Saturation sat = GetUnsignedSaturation(i); + if (sat == kUnsignedSatPositive) { + SetUint(vform, i, MaxUintFromFormat(vform)); + } else if (sat == kUnsignedSatNegative) { + SetUint(vform, i, 0); + } + } + return *this; + } + + // Getter for rounding state. + bool GetRounding(int index) { return round_[index]; } + + // Setter for rounding state. + void SetRounding(int index, bool round) { round_[index] = round; } + + // Round lanes of a vector based on rounding state. + LogicVRegister& Round(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + SetUint(vform, i, Uint(vform, i) + (GetRounding(i) ? 1 : 0)); + } + return *this; + } + + // Unsigned halve lanes of a vector, and use the saturation state to set the + // top bit. + LogicVRegister& Uhalve(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + uint64_t val = Uint(vform, i); + SetRounding(i, (val & 1) == 1); + val >>= 1; + if (GetUnsignedSaturation(i) != kNotSaturated) { + // If the operation causes unsigned saturation, the bit shifted into the + // most significant bit must be set. + val |= (MaxUintFromFormat(vform) >> 1) + 1; + } + SetInt(vform, i, val); + } + return *this; + } + + // Signed halve lanes of a vector, and use the carry state to set the top bit. + LogicVRegister& Halve(VectorFormat vform) { + for (int i = 0; i < LaneCountFromFormat(vform); i++) { + int64_t val = Int(vform, i); + SetRounding(i, (val & 1) == 1); + val >>= 1; + if (GetSignedSaturation(i) != kNotSaturated) { + // If the operation causes signed saturation, the sign bit must be + // inverted. + val ^= (MaxUintFromFormat(vform) >> 1) + 1; + } + SetInt(vform, i, val); + } + return *this; + } + + private: + SimVRegister& register_; + + // Allocate one saturation state entry per lane; largest register is type Q, + // and lanes can be a minimum of one byte wide. + Saturation saturated_[kQRegSizeInBytes]; + + // Allocate one rounding state entry per lane. + bool round_[kQRegSizeInBytes]; +}; + +// The proper way to initialize a simulated system register (such as NZCV) is as +// follows: +// SimSystemRegister nzcv = SimSystemRegister::DefaultValueFor(NZCV); +class SimSystemRegister { + public: + // The default constructor represents a register which has no writable bits. + // It is not possible to set its value to anything other than 0. + SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) {} + + uint32_t GetRawValue() const { return value_; } + VIXL_DEPRECATED("GetRawValue", uint32_t RawValue() const) { + return GetRawValue(); + } + + void SetRawValue(uint32_t new_value) { + value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_); + } + + uint32_t ExtractBits(int msb, int lsb) const { + return ExtractUnsignedBitfield32(msb, lsb, value_); + } + VIXL_DEPRECATED("ExtractBits", uint32_t Bits(int msb, int lsb) const) { + return ExtractBits(msb, lsb); + } + + int32_t ExtractSignedBits(int msb, int lsb) const { + return ExtractSignedBitfield32(msb, lsb, value_); + } + VIXL_DEPRECATED("ExtractSignedBits", + int32_t SignedBits(int msb, int lsb) const) { + return ExtractSignedBits(msb, lsb); + } + + void SetBits(int msb, int lsb, uint32_t bits); + + // Default system register values. + static SimSystemRegister DefaultValueFor(SystemRegister id); + +#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \ + uint32_t Get##Name() const { return this->Func(HighBit, LowBit); } \ + VIXL_DEPRECATED("Get" #Name, uint32_t Name() const) { return Get##Name(); } \ + void Set##Name(uint32_t bits) { SetBits(HighBit, LowBit, bits); } +#define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \ + static const uint32_t Name##WriteIgnoreMask = ~static_cast(Mask); + + SYSTEM_REGISTER_FIELDS_LIST(DEFINE_GETTER, DEFINE_WRITE_IGNORE_MASK) + +#undef DEFINE_ZERO_BITS +#undef DEFINE_GETTER + + protected: + // Most system registers only implement a few of the bits in the word. Other + // bits are "read-as-zero, write-ignored". The write_ignore_mask argument + // describes the bits which are not modifiable. + SimSystemRegister(uint32_t value, uint32_t write_ignore_mask) + : value_(value), write_ignore_mask_(write_ignore_mask) {} + + uint32_t value_; + uint32_t write_ignore_mask_; +}; + + +class SimExclusiveLocalMonitor { + public: + SimExclusiveLocalMonitor() : kSkipClearProbability(8), seed_(0x87654321) { + Clear(); + } + + // Clear the exclusive monitor (like clrex). + void Clear() { + address_ = 0; + size_ = 0; + } + + // Clear the exclusive monitor most of the time. + void MaybeClear() { + if ((seed_ % kSkipClearProbability) != 0) { + Clear(); + } + + // Advance seed_ using a simple linear congruential generator. + seed_ = (seed_ * 48271) % 2147483647; + } + + // Mark the address range for exclusive access (like load-exclusive). + void MarkExclusive(uint64_t address, size_t size) { + address_ = address; + size_ = size; + } + + // Return true if the address range is marked (like store-exclusive). + // This helper doesn't implicitly clear the monitor. + bool IsExclusive(uint64_t address, size_t size) { + VIXL_ASSERT(size > 0); + // Be pedantic: Require both the address and the size to match. + return (size == size_) && (address == address_); + } + + private: + uint64_t address_; + size_t size_; + + const int kSkipClearProbability; + uint32_t seed_; +}; + + +// We can't accurate simulate the global monitor since it depends on external +// influences. Instead, this implementation occasionally causes accesses to +// fail, according to kPassProbability. +class SimExclusiveGlobalMonitor { + public: + SimExclusiveGlobalMonitor() : kPassProbability(8), seed_(0x87654321) {} + + bool IsExclusive(uint64_t address, size_t size) { + USE(address, size); + + bool pass = (seed_ % kPassProbability) != 0; + // Advance seed_ using a simple linear congruential generator. + seed_ = (seed_ * 48271) % 2147483647; + return pass; + } + + private: + const int kPassProbability; + uint32_t seed_; +}; + + +class Simulator : public DecoderVisitor { + public: + explicit Simulator(Decoder* decoder, FILE* stream = stdout); + ~Simulator(); + + void ResetState(); + + // Run the simulator. + virtual void Run(); + void RunFrom(const Instruction* first); + + +#if defined(VIXL_HAS_ABI_SUPPORT) && __cplusplus >= 201103L && \ + (defined(__clang__) || GCC_VERSION_OR_NEWER(4, 9, 1)) + // Templated `RunFrom` version taking care of passing arguments and returning + // the result value. + // This allows code like: + // int32_t res = simulator.RunFrom(GenerateCode(), + // 0x123); + // It requires VIXL's ABI features, and C++11 or greater. + // Also, the initialisation of tuples is incorrect in GCC before 4.9.1: + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51253 + template + R RunFrom(const Instruction* code, P... arguments) { + return RunFromStructHelper::Wrapper(this, code, arguments...); + } + + template + struct RunFromStructHelper { + static R Wrapper(Simulator* simulator, + const Instruction* code, + P... arguments) { + ABI abi; + std::tuple unused_tuple{ + // TODO: We currently do not support arguments passed on the stack. We + // could do so by using `WriteGenericOperand()` here, but may need to + // add features to handle situations where the stack is or is not set + // up. + (simulator->WriteCPURegister(abi.GetNextParameterGenericOperand

() + .GetCPURegister(), + arguments), + arguments)...}; + simulator->RunFrom(code); + return simulator->ReadGenericOperand(abi.GetReturnGenericOperand()); + } + }; + + // Partial specialization when the return type is `void`. + template + struct RunFromStructHelper { + static void Wrapper(Simulator* simulator, + const Instruction* code, + P... arguments) { + ABI abi; + std::tuple unused_tuple{ + // TODO: We currently do not support arguments passed on the stack. We + // could do so by using `WriteGenericOperand()` here, but may need to + // add features to handle situations where the stack is or is not set + // up. + (simulator->WriteCPURegister(abi.GetNextParameterGenericOperand

() + .GetCPURegister(), + arguments), + arguments)...}; + simulator->RunFrom(code); + } + }; +#endif + + // Execution ends when the PC hits this address. + static const Instruction* kEndOfSimAddress; + + // Simulation helpers. + const Instruction* ReadPc() const { return pc_; } + VIXL_DEPRECATED("ReadPc", const Instruction* pc() const) { return ReadPc(); } + + enum BranchLogMode { LogBranches, NoBranchLog }; + + void WritePc(const Instruction* new_pc, + BranchLogMode log_mode = LogBranches) { + if (log_mode == LogBranches) LogTakenBranch(new_pc); + pc_ = Memory::AddressUntag(new_pc); + pc_modified_ = true; + } + VIXL_DEPRECATED("WritePc", void set_pc(const Instruction* new_pc)) { + return WritePc(new_pc); + } + + void IncrementPc() { + if (!pc_modified_) { + pc_ = pc_->GetNextInstruction(); + } + } + VIXL_DEPRECATED("IncrementPc", void increment_pc()) { IncrementPc(); } + + BType ReadBType() const { return btype_; } + void WriteNextBType(BType btype) { next_btype_ = btype; } + void UpdateBType() { + btype_ = next_btype_; + next_btype_ = DefaultBType; + } + + // Helper function to determine BType for branches. + BType GetBTypeFromInstruction(const Instruction* instr) const; + + bool PcIsInGuardedPage() const { return guard_pages_; } + void SetGuardedPages(bool guard_pages) { guard_pages_ = guard_pages; } + + void ExecuteInstruction() { + // The program counter should always be aligned. + VIXL_ASSERT(IsWordAligned(pc_)); + pc_modified_ = false; + + // On guarded pages, if BType is not zero, take an exception on any + // instruction other than BTI, PACI[AB]SP, HLT or BRK. + if (PcIsInGuardedPage() && (ReadBType() != DefaultBType)) { + if (pc_->IsPAuth()) { + Instr i = pc_->Mask(SystemPAuthMask); + if ((i != PACIASP) && (i != PACIBSP)) { + VIXL_ABORT_WITH_MSG( + "Executing non-BTI instruction with wrong BType."); + } + } else if (!pc_->IsBti() && !pc_->IsException()) { + VIXL_ABORT_WITH_MSG("Executing non-BTI instruction with wrong BType."); + } + } + + // decoder_->Decode(...) triggers at least the following visitors: + // 1. The CPUFeaturesAuditor (`cpu_features_auditor_`). + // 2. The PrintDisassembler (`print_disasm_`), if enabled. + // 3. The Simulator (`this`). + // User can add additional visitors at any point, but the Simulator requires + // that the ordering above is preserved. + decoder_->Decode(pc_); + IncrementPc(); + LogAllWrittenRegisters(); + UpdateBType(); + + VIXL_CHECK(cpu_features_auditor_.InstructionIsAvailable()); + } + +// Declare all Visitor functions. +#define DECLARE(A) \ + virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE; + VISITOR_LIST_THAT_RETURN(DECLARE) +#undef DECLARE + +#define DECLARE(A) \ + VIXL_DEBUG_NO_RETURN virtual void Visit##A(const Instruction* instr) \ + VIXL_OVERRIDE; + VISITOR_LIST_THAT_DONT_RETURN(DECLARE) +#undef DECLARE + + + // Integer register accessors. + + // Basic accessor: Read the register as the specified type. + template + T ReadRegister(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const { + VIXL_ASSERT( + code < kNumberOfRegisters || + ((r31mode == Reg31IsZeroRegister) && (code == kSPRegInternalCode))); + if ((code == 31) && (r31mode == Reg31IsZeroRegister)) { + T result; + memset(&result, 0, sizeof(result)); + return result; + } + if ((r31mode == Reg31IsZeroRegister) && (code == kSPRegInternalCode)) { + code = 31; + } + return registers_[code].Get(); + } + template + VIXL_DEPRECATED("ReadRegister", + T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) + const) { + return ReadRegister(code, r31mode); + } + + // Common specialized accessors for the ReadRegister() template. + int32_t ReadWRegister(unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const { + return ReadRegister(code, r31mode); + } + VIXL_DEPRECATED("ReadWRegister", + int32_t wreg(unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const) { + return ReadWRegister(code, r31mode); + } + + int64_t ReadXRegister(unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const { + return ReadRegister(code, r31mode); + } + VIXL_DEPRECATED("ReadXRegister", + int64_t xreg(unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const) { + return ReadXRegister(code, r31mode); + } + + // As above, with parameterized size and return type. The value is + // either zero-extended or truncated to fit, as required. + template + T ReadRegister(unsigned size, + unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const { + uint64_t raw; + switch (size) { + case kWRegSize: + raw = ReadRegister(code, r31mode); + break; + case kXRegSize: + raw = ReadRegister(code, r31mode); + break; + default: + VIXL_UNREACHABLE(); + return 0; + } + + T result; + VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(raw)); + // Copy the result and truncate to fit. This assumes a little-endian host. + memcpy(&result, &raw, sizeof(result)); + return result; + } + template + VIXL_DEPRECATED("ReadRegister", + T reg(unsigned size, + unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const) { + return ReadRegister(size, code, r31mode); + } + + // Use int64_t by default if T is not specified. + int64_t ReadRegister(unsigned size, + unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const { + return ReadRegister(size, code, r31mode); + } + VIXL_DEPRECATED("ReadRegister", + int64_t reg(unsigned size, + unsigned code, + Reg31Mode r31mode = Reg31IsZeroRegister) const) { + return ReadRegister(size, code, r31mode); + } + + enum RegLogMode { LogRegWrites, NoRegLog }; + + // Write 'value' into an integer register. The value is zero-extended. This + // behaviour matches AArch64 register writes. + template + void WriteRegister(unsigned code, + T value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister) { + if (sizeof(T) < kWRegSizeInBytes) { + // We use a C-style cast on purpose here. + // Since we do not have access to 'constepxr if', the casts in this `if` + // must be valid even if we know the code will never be executed, in + // particular when `T` is a pointer type. + int64_t tmp_64bit = (int64_t)value; + int32_t tmp_32bit = static_cast(tmp_64bit); + WriteRegister(code, tmp_32bit, log_mode, r31mode); + return; + } + + VIXL_ASSERT((sizeof(T) == kWRegSizeInBytes) || + (sizeof(T) == kXRegSizeInBytes)); + VIXL_ASSERT( + code < kNumberOfRegisters || + ((r31mode == Reg31IsZeroRegister) && (code == kSPRegInternalCode))); + + if ((code == 31) && (r31mode == Reg31IsZeroRegister)) { + return; + } + + if ((r31mode == Reg31IsZeroRegister) && (code == kSPRegInternalCode)) { + code = 31; + } + + registers_[code].Write(value); + + if (log_mode == LogRegWrites) LogRegister(code, r31mode); + } + template + VIXL_DEPRECATED("WriteRegister", + void set_reg(unsigned code, + T value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister)) { + WriteRegister(code, value, log_mode, r31mode); + } + + // Common specialized accessors for the set_reg() template. + void WriteWRegister(unsigned code, + int32_t value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister) { + WriteRegister(code, value, log_mode, r31mode); + } + VIXL_DEPRECATED("WriteWRegister", + void set_wreg(unsigned code, + int32_t value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister)) { + WriteWRegister(code, value, log_mode, r31mode); + } + + void WriteXRegister(unsigned code, + int64_t value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister) { + WriteRegister(code, value, log_mode, r31mode); + } + VIXL_DEPRECATED("WriteXRegister", + void set_xreg(unsigned code, + int64_t value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister)) { + WriteXRegister(code, value, log_mode, r31mode); + } + + // As above, with parameterized size and type. The value is either + // zero-extended or truncated to fit, as required. + template + void WriteRegister(unsigned size, + unsigned code, + T value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister) { + // Zero-extend the input. + uint64_t raw = 0; + VIXL_STATIC_ASSERT(sizeof(value) <= sizeof(raw)); + memcpy(&raw, &value, sizeof(value)); + + // Write (and possibly truncate) the value. + switch (size) { + case kWRegSize: + WriteRegister(code, static_cast(raw), log_mode, r31mode); + break; + case kXRegSize: + WriteRegister(code, raw, log_mode, r31mode); + break; + default: + VIXL_UNREACHABLE(); + return; + } + } + template + VIXL_DEPRECATED("WriteRegister", + void set_reg(unsigned size, + unsigned code, + T value, + RegLogMode log_mode = LogRegWrites, + Reg31Mode r31mode = Reg31IsZeroRegister)) { + WriteRegister(size, code, value, log_mode, r31mode); + } + + // Common specialized accessors for the set_reg() template. + + // Commonly-used special cases. + template + void WriteLr(T value) { + WriteRegister(kLinkRegCode, value); + } + template + VIXL_DEPRECATED("WriteLr", void set_lr(T value)) { + WriteLr(value); + } + + template + void WriteSp(T value) { + WriteRegister(31, value, LogRegWrites, Reg31IsStackPointer); + } + template + VIXL_DEPRECATED("WriteSp", void set_sp(T value)) { + WriteSp(value); + } + + // Vector register accessors. + // These are equivalent to the integer register accessors, but for vector + // registers. + + // A structure for representing a 128-bit Q register. + struct qreg_t { + uint8_t val[kQRegSizeInBytes]; + }; + + // Basic accessor: read the register as the specified type. + template + T ReadVRegister(unsigned code) const { + VIXL_STATIC_ASSERT( + (sizeof(T) == kBRegSizeInBytes) || (sizeof(T) == kHRegSizeInBytes) || + (sizeof(T) == kSRegSizeInBytes) || (sizeof(T) == kDRegSizeInBytes) || + (sizeof(T) == kQRegSizeInBytes)); + VIXL_ASSERT(code < kNumberOfVRegisters); + + return vregisters_[code].Get(); + } + template + VIXL_DEPRECATED("ReadVRegister", T vreg(unsigned code) const) { + return ReadVRegister(code); + } + + // Common specialized accessors for the vreg() template. + int8_t ReadBRegister(unsigned code) const { + return ReadVRegister(code); + } + VIXL_DEPRECATED("ReadBRegister", int8_t breg(unsigned code) const) { + return ReadBRegister(code); + } + + vixl::internal::SimFloat16 ReadHRegister(unsigned code) const { + return RawbitsToFloat16(ReadHRegisterBits(code)); + } + VIXL_DEPRECATED("ReadHRegister", int16_t hreg(unsigned code) const) { + return Float16ToRawbits(ReadHRegister(code)); + } + + uint16_t ReadHRegisterBits(unsigned code) const { + return ReadVRegister(code); + } + + float ReadSRegister(unsigned code) const { + return ReadVRegister(code); + } + VIXL_DEPRECATED("ReadSRegister", float sreg(unsigned code) const) { + return ReadSRegister(code); + } + + uint32_t ReadSRegisterBits(unsigned code) const { + return ReadVRegister(code); + } + VIXL_DEPRECATED("ReadSRegisterBits", + uint32_t sreg_bits(unsigned code) const) { + return ReadSRegisterBits(code); + } + + double ReadDRegister(unsigned code) const { + return ReadVRegister(code); + } + VIXL_DEPRECATED("ReadDRegister", double dreg(unsigned code) const) { + return ReadDRegister(code); + } + + uint64_t ReadDRegisterBits(unsigned code) const { + return ReadVRegister(code); + } + VIXL_DEPRECATED("ReadDRegisterBits", + uint64_t dreg_bits(unsigned code) const) { + return ReadDRegisterBits(code); + } + + qreg_t ReadQRegister(unsigned code) const { + return ReadVRegister(code); + } + VIXL_DEPRECATED("ReadQRegister", qreg_t qreg(unsigned code) const) { + return ReadQRegister(code); + } + + // As above, with parameterized size and return type. The value is + // either zero-extended or truncated to fit, as required. + template + T ReadVRegister(unsigned size, unsigned code) const { + uint64_t raw = 0; + T result; + + switch (size) { + case kSRegSize: + raw = ReadVRegister(code); + break; + case kDRegSize: + raw = ReadVRegister(code); + break; + default: + VIXL_UNREACHABLE(); + break; + } + + VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(raw)); + // Copy the result and truncate to fit. This assumes a little-endian host. + memcpy(&result, &raw, sizeof(result)); + return result; + } + template + VIXL_DEPRECATED("ReadVRegister", T vreg(unsigned size, unsigned code) const) { + return ReadVRegister(size, code); + } + + SimVRegister& ReadVRegister(unsigned code) { return vregisters_[code]; } + VIXL_DEPRECATED("ReadVRegister", SimVRegister& vreg(unsigned code)) { + return ReadVRegister(code); + } + + // Basic accessor: Write the specified value. + template + void WriteVRegister(unsigned code, + T value, + RegLogMode log_mode = LogRegWrites) { + VIXL_STATIC_ASSERT((sizeof(value) == kBRegSizeInBytes) || + (sizeof(value) == kHRegSizeInBytes) || + (sizeof(value) == kSRegSizeInBytes) || + (sizeof(value) == kDRegSizeInBytes) || + (sizeof(value) == kQRegSizeInBytes)); + VIXL_ASSERT(code < kNumberOfVRegisters); + vregisters_[code].Write(value); + + if (log_mode == LogRegWrites) { + LogVRegister(code, GetPrintRegisterFormat(value)); + } + } + template + VIXL_DEPRECATED("WriteVRegister", + void set_vreg(unsigned code, + T value, + RegLogMode log_mode = LogRegWrites)) { + WriteVRegister(code, value, log_mode); + } + + // Common specialized accessors for the WriteVRegister() template. + void WriteBRegister(unsigned code, + int8_t value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(code, value, log_mode); + } + VIXL_DEPRECATED("WriteBRegister", + void set_breg(unsigned code, + int8_t value, + RegLogMode log_mode = LogRegWrites)) { + return WriteBRegister(code, value, log_mode); + } + + void WriteHRegister(unsigned code, + vixl::internal::SimFloat16 value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(code, Float16ToRawbits(value), log_mode); + } + + void WriteHRegister(unsigned code, + int16_t value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(code, value, log_mode); + } + VIXL_DEPRECATED("WriteHRegister", + void set_hreg(unsigned code, + int16_t value, + RegLogMode log_mode = LogRegWrites)) { + return WriteHRegister(code, value, log_mode); + } + + void WriteSRegister(unsigned code, + float value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(code, value, log_mode); + } + VIXL_DEPRECATED("WriteSRegister", + void set_sreg(unsigned code, + float value, + RegLogMode log_mode = LogRegWrites)) { + WriteSRegister(code, value, log_mode); + } + + void WriteSRegisterBits(unsigned code, + uint32_t value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(code, value, log_mode); + } + VIXL_DEPRECATED("WriteSRegisterBits", + void set_sreg_bits(unsigned code, + uint32_t value, + RegLogMode log_mode = LogRegWrites)) { + WriteSRegisterBits(code, value, log_mode); + } + + void WriteDRegister(unsigned code, + double value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(code, value, log_mode); + } + VIXL_DEPRECATED("WriteDRegister", + void set_dreg(unsigned code, + double value, + RegLogMode log_mode = LogRegWrites)) { + WriteDRegister(code, value, log_mode); + } + + void WriteDRegisterBits(unsigned code, + uint64_t value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(code, value, log_mode); + } + VIXL_DEPRECATED("WriteDRegisterBits", + void set_dreg_bits(unsigned code, + uint64_t value, + RegLogMode log_mode = LogRegWrites)) { + WriteDRegisterBits(code, value, log_mode); + } + + void WriteQRegister(unsigned code, + qreg_t value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(code, value, log_mode); + } + VIXL_DEPRECATED("WriteQRegister", + void set_qreg(unsigned code, + qreg_t value, + RegLogMode log_mode = LogRegWrites)) { + WriteQRegister(code, value, log_mode); + } + + template + T ReadRegister(Register reg) const { + return ReadRegister(reg.GetCode(), Reg31IsZeroRegister); + } + + template + void WriteRegister(Register reg, + T value, + RegLogMode log_mode = LogRegWrites) { + WriteRegister(reg.GetCode(), value, log_mode, Reg31IsZeroRegister); + } + + template + T ReadVRegister(VRegister vreg) const { + return ReadVRegister(vreg.GetCode()); + } + + template + void WriteVRegister(VRegister vreg, + T value, + RegLogMode log_mode = LogRegWrites) { + WriteVRegister(vreg.GetCode(), value, log_mode); + } + + template + T ReadCPURegister(CPURegister reg) const { + if (reg.IsVRegister()) { + return ReadVRegister(VRegister(reg)); + } else { + return ReadRegister(Register(reg)); + } + } + + template + void WriteCPURegister(CPURegister reg, + T value, + RegLogMode log_mode = LogRegWrites) { + if (reg.IsVRegister()) { + WriteVRegister(VRegister(reg), value, log_mode); + } else { + WriteRegister(Register(reg), value, log_mode); + } + } + + uint64_t ComputeMemOperandAddress(const MemOperand& mem_op) const; + + template + T ReadGenericOperand(GenericOperand operand) const { + if (operand.IsCPURegister()) { + return ReadCPURegister(operand.GetCPURegister()); + } else { + VIXL_ASSERT(operand.IsMemOperand()); + return Memory::Read(ComputeMemOperandAddress(operand.GetMemOperand())); + } + } + + template + void WriteGenericOperand(GenericOperand operand, + T value, + RegLogMode log_mode = LogRegWrites) { + if (operand.IsCPURegister()) { + // Outside SIMD, registers are 64-bit or a subset of a 64-bit register. If + // the width of the value to write is smaller than 64 bits, the unused + // bits may contain unrelated values that the code following this write + // needs to handle gracefully. + // Here we fill the unused bits with a predefined pattern to catch issues + // early. + VIXL_ASSERT(operand.GetCPURegister().GetSizeInBits() <= 64); + uint64_t raw = 0xdeadda1adeadda1a; + memcpy(&raw, &value, sizeof(value)); + WriteCPURegister(operand.GetCPURegister(), raw, log_mode); + } else { + VIXL_ASSERT(operand.IsMemOperand()); + Memory::Write(ComputeMemOperandAddress(operand.GetMemOperand()), value); + } + } + + bool ReadN() const { return nzcv_.GetN() != 0; } + VIXL_DEPRECATED("ReadN", bool N() const) { return ReadN(); } + + bool ReadZ() const { return nzcv_.GetZ() != 0; } + VIXL_DEPRECATED("ReadZ", bool Z() const) { return ReadZ(); } + + bool ReadC() const { return nzcv_.GetC() != 0; } + VIXL_DEPRECATED("ReadC", bool C() const) { return ReadC(); } + + bool ReadV() const { return nzcv_.GetV() != 0; } + VIXL_DEPRECATED("ReadV", bool V() const) { return ReadV(); } + + SimSystemRegister& ReadNzcv() { return nzcv_; } + VIXL_DEPRECATED("ReadNzcv", SimSystemRegister& nzcv()) { return ReadNzcv(); } + + // TODO: Find a way to make the fpcr_ members return the proper types, so + // these accessors are not necessary. + FPRounding ReadRMode() const { + return static_cast(fpcr_.GetRMode()); + } + VIXL_DEPRECATED("ReadRMode", FPRounding RMode()) { return ReadRMode(); } + + UseDefaultNaN ReadDN() const { + return fpcr_.GetDN() != 0 ? kUseDefaultNaN : kIgnoreDefaultNaN; + } + + VIXL_DEPRECATED("ReadDN", bool DN()) { + return ReadDN() == kUseDefaultNaN ? true : false; + } + + SimSystemRegister& ReadFpcr() { return fpcr_; } + VIXL_DEPRECATED("ReadFpcr", SimSystemRegister& fpcr()) { return ReadFpcr(); } + + // Specify relevant register formats for Print(V)Register and related helpers. + enum PrintRegisterFormat { + // The lane size. + kPrintRegLaneSizeB = 0 << 0, + kPrintRegLaneSizeH = 1 << 0, + kPrintRegLaneSizeS = 2 << 0, + kPrintRegLaneSizeW = kPrintRegLaneSizeS, + kPrintRegLaneSizeD = 3 << 0, + kPrintRegLaneSizeX = kPrintRegLaneSizeD, + kPrintRegLaneSizeQ = 4 << 0, + + kPrintRegLaneSizeOffset = 0, + kPrintRegLaneSizeMask = 7 << 0, + + // The lane count. + kPrintRegAsScalar = 0, + kPrintRegAsDVector = 1 << 3, + kPrintRegAsQVector = 2 << 3, + + kPrintRegAsVectorMask = 3 << 3, + + // Indicate floating-point format lanes. (This flag is only supported for + // S-, H-, and D-sized lanes.) + kPrintRegAsFP = 1 << 5, + + // Supported combinations. + + kPrintXReg = kPrintRegLaneSizeX | kPrintRegAsScalar, + kPrintWReg = kPrintRegLaneSizeW | kPrintRegAsScalar, + kPrintHReg = kPrintRegLaneSizeH | kPrintRegAsScalar | kPrintRegAsFP, + kPrintSReg = kPrintRegLaneSizeS | kPrintRegAsScalar | kPrintRegAsFP, + kPrintDReg = kPrintRegLaneSizeD | kPrintRegAsScalar | kPrintRegAsFP, + + kPrintReg1B = kPrintRegLaneSizeB | kPrintRegAsScalar, + kPrintReg8B = kPrintRegLaneSizeB | kPrintRegAsDVector, + kPrintReg16B = kPrintRegLaneSizeB | kPrintRegAsQVector, + kPrintReg1H = kPrintRegLaneSizeH | kPrintRegAsScalar, + kPrintReg4H = kPrintRegLaneSizeH | kPrintRegAsDVector, + kPrintReg8H = kPrintRegLaneSizeH | kPrintRegAsQVector, + kPrintReg1S = kPrintRegLaneSizeS | kPrintRegAsScalar, + kPrintReg2S = kPrintRegLaneSizeS | kPrintRegAsDVector, + kPrintReg4S = kPrintRegLaneSizeS | kPrintRegAsQVector, + kPrintReg1HFP = kPrintRegLaneSizeH | kPrintRegAsScalar | kPrintRegAsFP, + kPrintReg4HFP = kPrintRegLaneSizeH | kPrintRegAsDVector | kPrintRegAsFP, + kPrintReg8HFP = kPrintRegLaneSizeH | kPrintRegAsQVector | kPrintRegAsFP, + kPrintReg1SFP = kPrintRegLaneSizeS | kPrintRegAsScalar | kPrintRegAsFP, + kPrintReg2SFP = kPrintRegLaneSizeS | kPrintRegAsDVector | kPrintRegAsFP, + kPrintReg4SFP = kPrintRegLaneSizeS | kPrintRegAsQVector | kPrintRegAsFP, + kPrintReg1D = kPrintRegLaneSizeD | kPrintRegAsScalar, + kPrintReg2D = kPrintRegLaneSizeD | kPrintRegAsQVector, + kPrintReg1DFP = kPrintRegLaneSizeD | kPrintRegAsScalar | kPrintRegAsFP, + kPrintReg2DFP = kPrintRegLaneSizeD | kPrintRegAsQVector | kPrintRegAsFP, + kPrintReg1Q = kPrintRegLaneSizeQ | kPrintRegAsScalar + }; + + unsigned GetPrintRegLaneSizeInBytesLog2(PrintRegisterFormat format) { + return (format & kPrintRegLaneSizeMask) >> kPrintRegLaneSizeOffset; + } + + unsigned GetPrintRegLaneSizeInBytes(PrintRegisterFormat format) { + return 1 << GetPrintRegLaneSizeInBytesLog2(format); + } + + unsigned GetPrintRegSizeInBytesLog2(PrintRegisterFormat format) { + if (format & kPrintRegAsDVector) return kDRegSizeInBytesLog2; + if (format & kPrintRegAsQVector) return kQRegSizeInBytesLog2; + + // Scalar types. + return GetPrintRegLaneSizeInBytesLog2(format); + } + + unsigned GetPrintRegSizeInBytes(PrintRegisterFormat format) { + return 1 << GetPrintRegSizeInBytesLog2(format); + } + + unsigned GetPrintRegLaneCount(PrintRegisterFormat format) { + unsigned reg_size_log2 = GetPrintRegSizeInBytesLog2(format); + unsigned lane_size_log2 = GetPrintRegLaneSizeInBytesLog2(format); + VIXL_ASSERT(reg_size_log2 >= lane_size_log2); + return 1 << (reg_size_log2 - lane_size_log2); + } + + PrintRegisterFormat GetPrintRegisterFormatForSize(unsigned reg_size, + unsigned lane_size); + + PrintRegisterFormat GetPrintRegisterFormatForSize(unsigned size) { + return GetPrintRegisterFormatForSize(size, size); + } + + PrintRegisterFormat GetPrintRegisterFormatForSizeFP(unsigned size) { + switch (size) { + default: + VIXL_UNREACHABLE(); + return kPrintDReg; + case kDRegSizeInBytes: + return kPrintDReg; + case kSRegSizeInBytes: + return kPrintSReg; + case kHRegSizeInBytes: + return kPrintHReg; + } + } + + PrintRegisterFormat GetPrintRegisterFormatTryFP(PrintRegisterFormat format) { + if ((GetPrintRegLaneSizeInBytes(format) == kHRegSizeInBytes) || + (GetPrintRegLaneSizeInBytes(format) == kSRegSizeInBytes) || + (GetPrintRegLaneSizeInBytes(format) == kDRegSizeInBytes)) { + return static_cast(format | kPrintRegAsFP); + } + return format; + } + + template + PrintRegisterFormat GetPrintRegisterFormat(T value) { + return GetPrintRegisterFormatForSize(sizeof(value)); + } + + PrintRegisterFormat GetPrintRegisterFormat(double value) { + VIXL_STATIC_ASSERT(sizeof(value) == kDRegSizeInBytes); + return GetPrintRegisterFormatForSizeFP(sizeof(value)); + } + + PrintRegisterFormat GetPrintRegisterFormat(float value) { + VIXL_STATIC_ASSERT(sizeof(value) == kSRegSizeInBytes); + return GetPrintRegisterFormatForSizeFP(sizeof(value)); + } + + PrintRegisterFormat GetPrintRegisterFormat(Float16 value) { + VIXL_STATIC_ASSERT(sizeof(Float16ToRawbits(value)) == kHRegSizeInBytes); + return GetPrintRegisterFormatForSizeFP(sizeof(Float16ToRawbits(value))); + } + + PrintRegisterFormat GetPrintRegisterFormat(VectorFormat vform); + PrintRegisterFormat GetPrintRegisterFormatFP(VectorFormat vform); + + // Print all registers of the specified types. + void PrintRegisters(); + void PrintVRegisters(); + void PrintSystemRegisters(); + + // As above, but only print the registers that have been updated. + void PrintWrittenRegisters(); + void PrintWrittenVRegisters(); + + // As above, but respect LOG_REG and LOG_VREG. + void LogWrittenRegisters() { + if (GetTraceParameters() & LOG_REGS) PrintWrittenRegisters(); + } + void LogWrittenVRegisters() { + if (GetTraceParameters() & LOG_VREGS) PrintWrittenVRegisters(); + } + void LogAllWrittenRegisters() { + LogWrittenRegisters(); + LogWrittenVRegisters(); + } + + // Print individual register values (after update). + void PrintRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer); + void PrintVRegister(unsigned code, PrintRegisterFormat format); + void PrintSystemRegister(SystemRegister id); + void PrintTakenBranch(const Instruction* target); + + // Like Print* (above), but respect GetTraceParameters(). + void LogRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer) { + if (GetTraceParameters() & LOG_REGS) PrintRegister(code, r31mode); + } + void LogVRegister(unsigned code, PrintRegisterFormat format) { + if (GetTraceParameters() & LOG_VREGS) PrintVRegister(code, format); + } + void LogSystemRegister(SystemRegister id) { + if (GetTraceParameters() & LOG_SYSREGS) PrintSystemRegister(id); + } + void LogTakenBranch(const Instruction* target) { + if (GetTraceParameters() & LOG_BRANCH) PrintTakenBranch(target); + } + + // Print memory accesses. + void PrintRead(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format); + void PrintWrite(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format); + void PrintVRead(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format, + unsigned lane); + void PrintVWrite(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format, + unsigned lane); + + // Like Print* (above), but respect GetTraceParameters(). + void LogRead(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format) { + if (GetTraceParameters() & LOG_REGS) PrintRead(address, reg_code, format); + } + void LogWrite(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format) { + if (GetTraceParameters() & LOG_WRITE) PrintWrite(address, reg_code, format); + } + void LogVRead(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format, + unsigned lane = 0) { + if (GetTraceParameters() & LOG_VREGS) { + PrintVRead(address, reg_code, format, lane); + } + } + void LogVWrite(uintptr_t address, + unsigned reg_code, + PrintRegisterFormat format, + unsigned lane = 0) { + if (GetTraceParameters() & LOG_WRITE) { + PrintVWrite(address, reg_code, format, lane); + } + } + + // Helper functions for register tracing. + void PrintRegisterRawHelper(unsigned code, + Reg31Mode r31mode, + int size_in_bytes = kXRegSizeInBytes); + void PrintVRegisterRawHelper(unsigned code, + int bytes = kQRegSizeInBytes, + int lsb = 0); + void PrintVRegisterFPHelper(unsigned code, + unsigned lane_size_in_bytes, + int lane_count = 1, + int rightmost_lane = 0); + + VIXL_NO_RETURN void DoUnreachable(const Instruction* instr); + void DoTrace(const Instruction* instr); + void DoLog(const Instruction* instr); + + static const char* WRegNameForCode(unsigned code, + Reg31Mode mode = Reg31IsZeroRegister); + static const char* XRegNameForCode(unsigned code, + Reg31Mode mode = Reg31IsZeroRegister); + static const char* HRegNameForCode(unsigned code); + static const char* SRegNameForCode(unsigned code); + static const char* DRegNameForCode(unsigned code); + static const char* VRegNameForCode(unsigned code); + + bool IsColouredTrace() const { return coloured_trace_; } + VIXL_DEPRECATED("IsColouredTrace", bool coloured_trace() const) { + return IsColouredTrace(); + } + + void SetColouredTrace(bool value); + VIXL_DEPRECATED("SetColouredTrace", void set_coloured_trace(bool value)) { + SetColouredTrace(value); + } + + // Values for traces parameters defined in simulator-constants-aarch64.h in + // enum TraceParameters. + int GetTraceParameters() const { return trace_parameters_; } + VIXL_DEPRECATED("GetTraceParameters", int trace_parameters() const) { + return GetTraceParameters(); + } + + void SetTraceParameters(int parameters); + VIXL_DEPRECATED("SetTraceParameters", + void set_trace_parameters(int parameters)) { + SetTraceParameters(parameters); + } + + void SetInstructionStats(bool value); + VIXL_DEPRECATED("SetInstructionStats", + void set_instruction_stats(bool value)) { + SetInstructionStats(value); + } + + // Clear the simulated local monitor to force the next store-exclusive + // instruction to fail. + void ClearLocalMonitor() { local_monitor_.Clear(); } + + void SilenceExclusiveAccessWarning() { + print_exclusive_access_warning_ = false; + } + + void CheckIsValidUnalignedAtomicAccess(int rn, + uint64_t address, + unsigned access_size) { + // Verify that the address is available to the host. + VIXL_ASSERT(address == static_cast(address)); + + if (GetCPUFeatures()->Has(CPUFeatures::kUSCAT)) { + // Check that the access falls entirely within one atomic access granule. + if (AlignDown(address, kAtomicAccessGranule) != + AlignDown(address + access_size - 1, kAtomicAccessGranule)) { + VIXL_ALIGNMENT_EXCEPTION(); + } + } else { + // Check that the access is aligned. + if (AlignDown(address, access_size) != address) { + VIXL_ALIGNMENT_EXCEPTION(); + } + } + + // The sp must be aligned to 16 bytes when it is accessed. + if ((rn == kSpRegCode) && (AlignDown(address, 16) != address)) { + VIXL_ALIGNMENT_EXCEPTION(); + } + } + + enum PointerType { kDataPointer, kInstructionPointer }; + + struct PACKey { + uint64_t high; + uint64_t low; + int number; + }; + + // Current implementation is that all pointers are tagged. + bool HasTBI(uint64_t ptr, PointerType type) { + USE(ptr, type); + return true; + } + + // Current implementation uses 48-bit virtual addresses. + int GetBottomPACBit(uint64_t ptr, int ttbr) { + USE(ptr, ttbr); + VIXL_ASSERT((ttbr == 0) || (ttbr == 1)); + return 48; + } + + // The top PAC bit is 55 for the purposes of relative bit fields with TBI, + // however bit 55 is the TTBR bit regardless of TBI so isn't part of the PAC + // codes in pointers. + int GetTopPACBit(uint64_t ptr, PointerType type) { + return HasTBI(ptr, type) ? 55 : 63; + } + + // Armv8.3 Pointer authentication helpers. + uint64_t CalculatePACMask(uint64_t ptr, PointerType type, int ext_bit); + uint64_t ComputePAC(uint64_t data, uint64_t context, PACKey key); + uint64_t AuthPAC(uint64_t ptr, + uint64_t context, + PACKey key, + PointerType type); + uint64_t AddPAC(uint64_t ptr, uint64_t context, PACKey key, PointerType type); + uint64_t StripPAC(uint64_t ptr, PointerType type); + + // The common CPUFeatures interface with the set of available features. + + CPUFeatures* GetCPUFeatures() { + return cpu_features_auditor_.GetCPUFeatures(); + } + + void SetCPUFeatures(const CPUFeatures& cpu_features) { + cpu_features_auditor_.SetCPUFeatures(cpu_features); + } + + // The set of features that the simulator has encountered. + const CPUFeatures& GetSeenFeatures() { + return cpu_features_auditor_.GetSeenFeatures(); + } + void ResetSeenFeatures() { cpu_features_auditor_.ResetSeenFeatures(); } + +// Runtime call emulation support. +// It requires VIXL's ABI features, and C++11 or greater. +// Also, the initialisation of the tuples in RuntimeCall(Non)Void is incorrect +// in GCC before 4.9.1: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51253 +#if defined(VIXL_HAS_ABI_SUPPORT) && __cplusplus >= 201103L && \ + (defined(__clang__) || GCC_VERSION_OR_NEWER(4, 9, 1)) + +#define VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT + +// The implementation of the runtime call helpers require the functionality +// provided by `std::index_sequence`. It is only available from C++14, but +// we want runtime call simulation to work from C++11, so we emulate if +// necessary. +#if __cplusplus >= 201402L + template + using local_index_sequence = std::index_sequence; + template + using __local_index_sequence_for = std::index_sequence_for; +#else + // Emulate the behaviour of `std::index_sequence` and + // `std::index_sequence_for`. + // Naming follow the `std` names, prefixed with `emulated_`. + template + struct emulated_index_sequence {}; + + // A recursive template to create a sequence of indexes. + // The base case (for `N == 0`) is declared outside of the class scope, as + // required by C++. + template + struct emulated_make_index_sequence_helper + : emulated_make_index_sequence_helper {}; + + template + struct emulated_make_index_sequence : emulated_make_index_sequence_helper { + }; + + template + struct emulated_index_sequence_for + : emulated_make_index_sequence {}; + + template + using local_index_sequence = emulated_index_sequence; + template + using __local_index_sequence_for = emulated_index_sequence_for; +#endif + + // Expand the argument tuple and perform the call. + template + R DoRuntimeCall(R (*function)(P...), + std::tuple arguments, + local_index_sequence) { + return function(std::get(arguments)...); + } + + template + void RuntimeCallNonVoid(R (*function)(P...)) { + ABI abi; + std::tuple argument_operands{ + ReadGenericOperand

(abi.GetNextParameterGenericOperand

())...}; + R return_value = DoRuntimeCall(function, + argument_operands, + __local_index_sequence_for{}); + WriteGenericOperand(abi.GetReturnGenericOperand(), return_value); + } + + template + void RuntimeCallVoid(R (*function)(P...)) { + ABI abi; + std::tuple argument_operands{ + ReadGenericOperand

(abi.GetNextParameterGenericOperand

())...}; + DoRuntimeCall(function, + argument_operands, + __local_index_sequence_for{}); + } + + // We use `struct` for `void` return type specialisation. + template + struct RuntimeCallStructHelper { + static void Wrapper(Simulator* simulator, uintptr_t function_pointer) { + R (*function)(P...) = reinterpret_cast(function_pointer); + simulator->RuntimeCallNonVoid(function); + } + }; + + // Partial specialization when the return type is `void`. + template + struct RuntimeCallStructHelper { + static void Wrapper(Simulator* simulator, uintptr_t function_pointer) { + void (*function)(P...) = + reinterpret_cast(function_pointer); + simulator->RuntimeCallVoid(function); + } + }; +#endif + + protected: + const char* clr_normal; + const char* clr_flag_name; + const char* clr_flag_value; + const char* clr_reg_name; + const char* clr_reg_value; + const char* clr_vreg_name; + const char* clr_vreg_value; + const char* clr_memory_address; + const char* clr_warning; + const char* clr_warning_message; + const char* clr_printf; + const char* clr_branch_marker; + + // Simulation helpers ------------------------------------ + bool ConditionPassed(Condition cond) { + switch (cond) { + case eq: + return ReadZ(); + case ne: + return !ReadZ(); + case hs: + return ReadC(); + case lo: + return !ReadC(); + case mi: + return ReadN(); + case pl: + return !ReadN(); + case vs: + return ReadV(); + case vc: + return !ReadV(); + case hi: + return ReadC() && !ReadZ(); + case ls: + return !(ReadC() && !ReadZ()); + case ge: + return ReadN() == ReadV(); + case lt: + return ReadN() != ReadV(); + case gt: + return !ReadZ() && (ReadN() == ReadV()); + case le: + return !(!ReadZ() && (ReadN() == ReadV())); + case nv: + VIXL_FALLTHROUGH(); + case al: + return true; + default: + VIXL_UNREACHABLE(); + return false; + } + } + + bool ConditionPassed(Instr cond) { + return ConditionPassed(static_cast(cond)); + } + + bool ConditionFailed(Condition cond) { return !ConditionPassed(cond); } + + void AddSubHelper(const Instruction* instr, int64_t op2); + uint64_t AddWithCarry(unsigned reg_size, + bool set_flags, + uint64_t left, + uint64_t right, + int carry_in = 0); + void LogicalHelper(const Instruction* instr, int64_t op2); + void ConditionalCompareHelper(const Instruction* instr, int64_t op2); + void LoadStoreHelper(const Instruction* instr, + int64_t offset, + AddrMode addrmode); + void LoadStorePairHelper(const Instruction* instr, AddrMode addrmode); + template + void CompareAndSwapHelper(const Instruction* instr); + template + void CompareAndSwapPairHelper(const Instruction* instr); + template + void AtomicMemorySimpleHelper(const Instruction* instr); + template + void AtomicMemorySwapHelper(const Instruction* instr); + template + void LoadAcquireRCpcHelper(const Instruction* instr); + template + void LoadAcquireRCpcUnscaledOffsetHelper(const Instruction* instr); + template + void StoreReleaseUnscaledOffsetHelper(const Instruction* instr); + uintptr_t AddressModeHelper(unsigned addr_reg, + int64_t offset, + AddrMode addrmode); + void NEONLoadStoreMultiStructHelper(const Instruction* instr, + AddrMode addr_mode); + void NEONLoadStoreSingleStructHelper(const Instruction* instr, + AddrMode addr_mode); + + uint64_t AddressUntag(uint64_t address) { return address & ~kAddressTagMask; } + + template + T* AddressUntag(T* address) { + uintptr_t address_raw = reinterpret_cast(address); + return reinterpret_cast(AddressUntag(address_raw)); + } + + int64_t ShiftOperand(unsigned reg_size, + int64_t value, + Shift shift_type, + unsigned amount) const; + int64_t ExtendValue(unsigned reg_width, + int64_t value, + Extend extend_type, + unsigned left_shift = 0) const; + uint16_t PolynomialMult(uint8_t op1, uint8_t op2) const; + + void ld1(VectorFormat vform, LogicVRegister dst, uint64_t addr); + void ld1(VectorFormat vform, LogicVRegister dst, int index, uint64_t addr); + void ld1r(VectorFormat vform, LogicVRegister dst, uint64_t addr); + void ld2(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + uint64_t addr); + void ld2(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + int index, + uint64_t addr); + void ld2r(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + uint64_t addr); + void ld3(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + uint64_t addr); + void ld3(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + int index, + uint64_t addr); + void ld3r(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + uint64_t addr); + void ld4(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + uint64_t addr); + void ld4(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + int index, + uint64_t addr); + void ld4r(VectorFormat vform, + LogicVRegister dst1, + LogicVRegister dst2, + LogicVRegister dst3, + LogicVRegister dst4, + uint64_t addr); + void st1(VectorFormat vform, LogicVRegister src, uint64_t addr); + void st1(VectorFormat vform, LogicVRegister src, int index, uint64_t addr); + void st2(VectorFormat vform, + LogicVRegister src, + LogicVRegister src2, + uint64_t addr); + void st2(VectorFormat vform, + LogicVRegister src, + LogicVRegister src2, + int index, + uint64_t addr); + void st3(VectorFormat vform, + LogicVRegister src, + LogicVRegister src2, + LogicVRegister src3, + uint64_t addr); + void st3(VectorFormat vform, + LogicVRegister src, + LogicVRegister src2, + LogicVRegister src3, + int index, + uint64_t addr); + void st4(VectorFormat vform, + LogicVRegister src, + LogicVRegister src2, + LogicVRegister src3, + LogicVRegister src4, + uint64_t addr); + void st4(VectorFormat vform, + LogicVRegister src, + LogicVRegister src2, + LogicVRegister src3, + LogicVRegister src4, + int index, + uint64_t addr); + LogicVRegister cmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond); + LogicVRegister cmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + int imm, + Condition cond); + LogicVRegister cmptst(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister add(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister addp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister mla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister mls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister mul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister mul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister mla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister mls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister pmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + + typedef LogicVRegister (Simulator::*ByElementOp)(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister fmulx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister smull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister smull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister umull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister umull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister smlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister smlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister umlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister umlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister smlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister smlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister umlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister umlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmull(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmull2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqrdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sdot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqrdmlah(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister udot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sqrdmlsh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + LogicVRegister sub(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister and_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister orr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister orn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister eor(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister bic(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister bic(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + uint64_t imm); + LogicVRegister bif(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister bit(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister bsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister cls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister clz(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister cnt(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister not_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister rbit(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister rev(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int revSize); + LogicVRegister rev16(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister rev32(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister rev64(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister addlp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + bool is_signed, + bool do_accumulate); + LogicVRegister saddlp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uaddlp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sadalp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uadalp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister ext(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index); + template + LogicVRegister fcadd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int rot); + LogicVRegister fcadd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int rot); + template + LogicVRegister fcmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index, + int rot); + LogicVRegister fcmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int index, + int rot); + template + LogicVRegister fcmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int rot); + LogicVRegister fcmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + int rot); + LogicVRegister ins_element(VectorFormat vform, + LogicVRegister dst, + int dst_index, + const LogicVRegister& src, + int src_index); + LogicVRegister ins_immediate(VectorFormat vform, + LogicVRegister dst, + int dst_index, + uint64_t imm); + LogicVRegister dup_element(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int src_index); + LogicVRegister dup_immediate(VectorFormat vform, + LogicVRegister dst, + uint64_t imm); + LogicVRegister movi(VectorFormat vform, LogicVRegister dst, uint64_t imm); + LogicVRegister mvni(VectorFormat vform, LogicVRegister dst, uint64_t imm); + LogicVRegister orr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + uint64_t imm); + LogicVRegister sshl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister ushl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister sminmax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max); + LogicVRegister smax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister smin(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister sminmaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max); + LogicVRegister smaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister sminp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister addp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister addv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uaddlv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister saddlv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sminmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + bool max); + LogicVRegister smaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sminv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uxtl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uxtl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sxtl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sxtl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& ind); + LogicVRegister tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& ind); + LogicVRegister tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& ind); + LogicVRegister tbl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& tab4, + const LogicVRegister& ind); + LogicVRegister Table(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& ind, + bool zero_out_of_bounds, + const LogicVRegister* tab1, + const LogicVRegister* tab2 = NULL, + const LogicVRegister* tab3 = NULL, + const LogicVRegister* tab4 = NULL); + LogicVRegister tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& ind); + LogicVRegister tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& ind); + LogicVRegister tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& ind); + LogicVRegister tbx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& tab, + const LogicVRegister& tab2, + const LogicVRegister& tab3, + const LogicVRegister& tab4, + const LogicVRegister& ind); + LogicVRegister uaddl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uaddl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uaddw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uaddw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister saddl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister saddl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister saddw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister saddw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister usubl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister usubl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister usubw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister usubw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister ssubl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister ssubl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister ssubw(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister ssubw2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uminmax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max); + LogicVRegister umax(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister umin(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uminmaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool max); + LogicVRegister umaxp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uminp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uminmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + bool max); + LogicVRegister umaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uminv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister trn1(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister trn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister zip1(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister zip2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uzp1(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uzp2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister shl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister scvtf(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int fbits, + FPRounding rounding_mode); + LogicVRegister ucvtf(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int fbits, + FPRounding rounding_mode); + LogicVRegister sshll(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sshll2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister shll(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister shll2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister ushll(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister ushll2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sli(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sri(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sshr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister ushr(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister ssra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister usra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister srsra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister ursra(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister suqadd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister usqadd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sqshl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister uqshl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqshlu(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister abs(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister neg(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister extractnarrow(VectorFormat vform, + LogicVRegister dst, + bool dstIsSigned, + const LogicVRegister& src, + bool srcIsSigned); + LogicVRegister xtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sqxtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister uqxtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister sqxtun(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister absdiff(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool issigned); + LogicVRegister saba(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister uaba(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister shrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister shrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister rshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister rshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister uqshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister uqshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister uqrshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister uqrshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqrshrn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqrshrn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqshrun(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqshrun2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqrshrun(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqrshrun2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + int shift); + LogicVRegister sqrdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round = true); + LogicVRegister dot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool is_signed); + LogicVRegister sdot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister udot(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister sqrdmlash(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round = true, + bool sub_op = false); + LogicVRegister sqrdmlah(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round = true); + LogicVRegister sqrdmlsh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + bool round = true); + LogicVRegister sqdmulh(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); +#define NEON_3VREG_LOGIC_LIST(V) \ + V(addhn) \ + V(addhn2) \ + V(raddhn) \ + V(raddhn2) \ + V(subhn) \ + V(subhn2) \ + V(rsubhn) \ + V(rsubhn2) \ + V(pmull) \ + V(pmull2) \ + V(sabal) \ + V(sabal2) \ + V(uabal) \ + V(uabal2) \ + V(sabdl) \ + V(sabdl2) \ + V(uabdl) \ + V(uabdl2) \ + V(smull) \ + V(smull2) \ + V(umull) \ + V(umull2) \ + V(smlal) \ + V(smlal2) \ + V(umlal) \ + V(umlal2) \ + V(smlsl) \ + V(smlsl2) \ + V(umlsl) \ + V(umlsl2) \ + V(sqdmlal) \ + V(sqdmlal2) \ + V(sqdmlsl) \ + V(sqdmlsl2) \ + V(sqdmull) \ + V(sqdmull2) + +#define DEFINE_LOGIC_FUNC(FXN) \ + LogicVRegister FXN(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2); + NEON_3VREG_LOGIC_LIST(DEFINE_LOGIC_FUNC) +#undef DEFINE_LOGIC_FUNC + +#define NEON_FP3SAME_LIST(V) \ + V(fadd, FPAdd, false) \ + V(fsub, FPSub, true) \ + V(fmul, FPMul, true) \ + V(fmulx, FPMulx, true) \ + V(fdiv, FPDiv, true) \ + V(fmax, FPMax, false) \ + V(fmin, FPMin, false) \ + V(fmaxnm, FPMaxNM, false) \ + V(fminnm, FPMinNM, false) + +#define DECLARE_NEON_FP_VECTOR_OP(FN, OP, PROCNAN) \ + template \ + LogicVRegister FN(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2); \ + LogicVRegister FN(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2); + NEON_FP3SAME_LIST(DECLARE_NEON_FP_VECTOR_OP) +#undef DECLARE_NEON_FP_VECTOR_OP + +#define NEON_FPPAIRWISE_LIST(V) \ + V(faddp, fadd, FPAdd) \ + V(fmaxp, fmax, FPMax) \ + V(fmaxnmp, fmaxnm, FPMaxNM) \ + V(fminp, fmin, FPMin) \ + V(fminnmp, fminnm, FPMinNM) + +#define DECLARE_NEON_FP_PAIR_OP(FNP, FN, OP) \ + LogicVRegister FNP(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src1, \ + const LogicVRegister& src2); \ + LogicVRegister FNP(VectorFormat vform, \ + LogicVRegister dst, \ + const LogicVRegister& src); + NEON_FPPAIRWISE_LIST(DECLARE_NEON_FP_PAIR_OP) +#undef DECLARE_NEON_FP_PAIR_OP + + template + LogicVRegister frecps(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister frecps(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + template + LogicVRegister frsqrts(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister frsqrts(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + template + LogicVRegister fmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister fmla(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + template + LogicVRegister fmls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister fmls(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister fnmul(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + + LogicVRegister fmlal(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister fmlal2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister fmlsl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister fmlsl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + + template + LogicVRegister fcmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond); + LogicVRegister fcmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond); + LogicVRegister fabscmp(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2, + Condition cond); + LogicVRegister fcmp_zero(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + Condition cond); + + template + LogicVRegister fneg(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fneg(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + template + LogicVRegister frecpx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister frecpx(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + template + LogicVRegister fabs_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fabs_(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fabd(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src1, + const LogicVRegister& src2); + LogicVRegister frint(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + bool inexact_exception = false); + LogicVRegister fcvts(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + int fbits = 0); + LogicVRegister fcvtu(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding_mode, + int fbits = 0); + LogicVRegister fcvtl(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtl2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtxn(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fcvtxn2(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fsqrt(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister frsqrte(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister frecpe(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + FPRounding rounding); + LogicVRegister ursqrte(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister urecpe(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + + template + struct TFPMinMaxOp { + typedef T (Simulator::*type)(T a, T b); + }; + + template + LogicVRegister fminmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src, + typename TFPMinMaxOp::type Op); + + LogicVRegister fminv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fmaxv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fminnmv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + LogicVRegister fmaxnmv(VectorFormat vform, + LogicVRegister dst, + const LogicVRegister& src); + + static const uint32_t CRC32_POLY = 0x04C11DB7; + static const uint32_t CRC32C_POLY = 0x1EDC6F41; + uint32_t Poly32Mod2(unsigned n, uint64_t data, uint32_t poly); + template + uint32_t Crc32Checksum(uint32_t acc, T val, uint32_t poly); + uint32_t Crc32Checksum(uint32_t acc, uint64_t val, uint32_t poly); + + void SysOp_W(int op, int64_t val); + + template + T FPRecipSqrtEstimate(T op); + template + T FPRecipEstimate(T op, FPRounding rounding); + template + R FPToFixed(T op, int fbits, bool is_signed, FPRounding rounding); + + void FPCompare(double val0, double val1, FPTrapFlags trap); + double FPRoundInt(double value, FPRounding round_mode); + double recip_sqrt_estimate(double a); + double recip_estimate(double a); + double FPRecipSqrtEstimate(double a); + double FPRecipEstimate(double a); + double FixedToDouble(int64_t src, int fbits, FPRounding round_mode); + double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode); + float FixedToFloat(int64_t src, int fbits, FPRounding round_mode); + float UFixedToFloat(uint64_t src, int fbits, FPRounding round_mode); + ::vixl::internal::SimFloat16 FixedToFloat16(int64_t src, + int fbits, + FPRounding round_mode); + ::vixl::internal::SimFloat16 UFixedToFloat16(uint64_t src, + int fbits, + FPRounding round_mode); + int16_t FPToInt16(double value, FPRounding rmode); + int32_t FPToInt32(double value, FPRounding rmode); + int64_t FPToInt64(double value, FPRounding rmode); + uint16_t FPToUInt16(double value, FPRounding rmode); + uint32_t FPToUInt32(double value, FPRounding rmode); + uint64_t FPToUInt64(double value, FPRounding rmode); + int32_t FPToFixedJS(double value); + + template + T FPAdd(T op1, T op2); + + template + T FPNeg(T op); + + template + T FPDiv(T op1, T op2); + + template + T FPMax(T a, T b); + + template + T FPMaxNM(T a, T b); + + template + T FPMin(T a, T b); + + template + T FPMinNM(T a, T b); + + template + T FPMul(T op1, T op2); + + template + T FPMulx(T op1, T op2); + + template + T FPMulAdd(T a, T op1, T op2); + + template + T FPSqrt(T op); + + template + T FPSub(T op1, T op2); + + template + T FPRecipStepFused(T op1, T op2); + + template + T FPRSqrtStepFused(T op1, T op2); + + // This doesn't do anything at the moment. We'll need it if we want support + // for cumulative exception bits or floating-point exceptions. + void FPProcessException() {} + + bool FPProcessNaNs(const Instruction* instr); + + // Pseudo Printf instruction + void DoPrintf(const Instruction* instr); + + // Pseudo-instructions to configure CPU features dynamically. + void DoConfigureCPUFeatures(const Instruction* instr); + + void DoSaveCPUFeatures(const Instruction* instr); + void DoRestoreCPUFeatures(const Instruction* instr); + +// Simulate a runtime call. +#ifndef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT + VIXL_NO_RETURN_IN_DEBUG_MODE +#endif + void DoRuntimeCall(const Instruction* instr); + + // Processor state --------------------------------------- + + // Simulated monitors for exclusive access instructions. + SimExclusiveLocalMonitor local_monitor_; + SimExclusiveGlobalMonitor global_monitor_; + + // Output stream. + FILE* stream_; + PrintDisassembler* print_disasm_; + + // Instruction statistics instrumentation. + Instrument* instrumentation_; + + // General purpose registers. Register 31 is the stack pointer. + SimRegister registers_[kNumberOfRegisters]; + + // Vector registers + SimVRegister vregisters_[kNumberOfVRegisters]; + + // Program Status Register. + // bits[31, 27]: Condition flags N, Z, C, and V. + // (Negative, Zero, Carry, Overflow) + SimSystemRegister nzcv_; + + // Floating-Point Control Register + SimSystemRegister fpcr_; + + // Only a subset of FPCR features are supported by the simulator. This helper + // checks that the FPCR settings are supported. + // + // This is checked when floating-point instructions are executed, not when + // FPCR is set. This allows generated code to modify FPCR for external + // functions, or to save and restore it when entering and leaving generated + // code. + void AssertSupportedFPCR() { + // No flush-to-zero support. + VIXL_ASSERT(ReadFpcr().GetFZ() == 0); + // Ties-to-even rounding only. + VIXL_ASSERT(ReadFpcr().GetRMode() == FPTieEven); + + // The simulator does not support half-precision operations so + // GetFpcr().AHP() is irrelevant, and is not checked here. + } + + static int CalcNFlag(uint64_t result, unsigned reg_size) { + return (result >> (reg_size - 1)) & 1; + } + + static int CalcZFlag(uint64_t result) { return (result == 0) ? 1 : 0; } + + static const uint32_t kConditionFlagsMask = 0xf0000000; + + // Stack + byte* stack_; + static const int stack_protection_size_ = 256; + // 2 KB stack. + static const int stack_size_ = 2 * 1024 + 2 * stack_protection_size_; + byte* stack_limit_; + + Decoder* decoder_; + // Indicates if the pc has been modified by the instruction and should not be + // automatically incremented. + bool pc_modified_; + const Instruction* pc_; + + // Branch type register, used for branch target identification. + BType btype_; + + // Next value of branch type register after the current instruction has been + // decoded. + BType next_btype_; + + // Global flag for enabling guarded pages. + // TODO: implement guarding at page granularity, rather than globally. + bool guard_pages_; + + static const char* xreg_names[]; + static const char* wreg_names[]; + static const char* hreg_names[]; + static const char* sreg_names[]; + static const char* dreg_names[]; + static const char* vreg_names[]; + + private: + static const PACKey kPACKeyIA; + static const PACKey kPACKeyIB; + static const PACKey kPACKeyDA; + static const PACKey kPACKeyDB; + static const PACKey kPACKeyGA; + + template + static T FPDefaultNaN(); + + // Standard NaN processing. + template + T FPProcessNaN(T op) { + VIXL_ASSERT(IsNaN(op)); + if (IsSignallingNaN(op)) { + FPProcessException(); + } + return (ReadDN() == kUseDefaultNaN) ? FPDefaultNaN() : ToQuietNaN(op); + } + + template + T FPProcessNaNs(T op1, T op2) { + if (IsSignallingNaN(op1)) { + return FPProcessNaN(op1); + } else if (IsSignallingNaN(op2)) { + return FPProcessNaN(op2); + } else if (IsNaN(op1)) { + VIXL_ASSERT(IsQuietNaN(op1)); + return FPProcessNaN(op1); + } else if (IsNaN(op2)) { + VIXL_ASSERT(IsQuietNaN(op2)); + return FPProcessNaN(op2); + } else { + return 0.0; + } + } + + template + T FPProcessNaNs3(T op1, T op2, T op3) { + if (IsSignallingNaN(op1)) { + return FPProcessNaN(op1); + } else if (IsSignallingNaN(op2)) { + return FPProcessNaN(op2); + } else if (IsSignallingNaN(op3)) { + return FPProcessNaN(op3); + } else if (IsNaN(op1)) { + VIXL_ASSERT(IsQuietNaN(op1)); + return FPProcessNaN(op1); + } else if (IsNaN(op2)) { + VIXL_ASSERT(IsQuietNaN(op2)); + return FPProcessNaN(op2); + } else if (IsNaN(op3)) { + VIXL_ASSERT(IsQuietNaN(op3)); + return FPProcessNaN(op3); + } else { + return 0.0; + } + } + + bool coloured_trace_; + + // A set of TraceParameters flags. + int trace_parameters_; + + // Indicates whether the instruction instrumentation is active. + bool instruction_stats_; + + // Indicates whether the exclusive-access warning has been printed. + bool print_exclusive_access_warning_; + void PrintExclusiveAccessWarning(); + + CPUFeaturesAuditor cpu_features_auditor_; + std::vector saved_cpu_features_; +}; + +#if defined(VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT) && __cplusplus < 201402L +// Base case of the recursive template used to emulate C++14 +// `std::index_sequence`. +template +struct Simulator::emulated_make_index_sequence_helper<0, I...> + : Simulator::emulated_index_sequence {}; +#endif + +} // namespace aarch64 +} // namespace vixl + +//#endif // VIXL_INCLUDE_SIMULATOR_AARCH64 + +#endif // VIXL_AARCH64_SIMULATOR_AARCH64_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/aarch64/simulator-constants-aarch64.h b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/simulator-constants-aarch64.h new file mode 100644 index 00000000..6631043d --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/aarch64/simulator-constants-aarch64.h @@ -0,0 +1,192 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_AARCH64_SIMULATOR_CONSTANTS_AARCH64_H_ +#define VIXL_AARCH64_SIMULATOR_CONSTANTS_AARCH64_H_ + +#include "instructions-aarch64.h" + +namespace vixl { +namespace aarch64 { + +// Debug instructions. +// +// VIXL's macro-assembler and simulator support a few pseudo instructions to +// make debugging easier. These pseudo instructions do not exist on real +// hardware. +// +// TODO: Also consider allowing these pseudo-instructions to be disabled in the +// simulator, so that users can check that the input is a valid native code. +// (This isn't possible in all cases. Printf won't work, for example.) +// +// Each debug pseudo instruction is represented by a HLT instruction. The HLT +// immediate field is used to identify the type of debug pseudo instruction. + +enum DebugHltOpcode { + kUnreachableOpcode = 0xdeb0, + kPrintfOpcode, + kTraceOpcode, + kLogOpcode, + kRuntimeCallOpcode, + kSetCPUFeaturesOpcode, + kEnableCPUFeaturesOpcode, + kDisableCPUFeaturesOpcode, + kSaveCPUFeaturesOpcode, + kRestoreCPUFeaturesOpcode, + // Aliases. + kDebugHltFirstOpcode = kUnreachableOpcode, + kDebugHltLastOpcode = kLogOpcode +}; +VIXL_DEPRECATED("DebugHltOpcode", typedef DebugHltOpcode DebugHltOpcodes); + +// Each pseudo instruction uses a custom encoding for additional arguments, as +// described below. + +// Unreachable - kUnreachableOpcode +// +// Instruction which should never be executed. This is used as a guard in parts +// of the code that should not be reachable, such as in data encoded inline in +// the instructions. + +// Printf - kPrintfOpcode +// - arg_count: The number of arguments. +// - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields. +// +// Simulate a call to printf. +// +// Floating-point and integer arguments are passed in separate sets of registers +// in AAPCS64 (even for varargs functions), so it is not possible to determine +// the type of each argument without some information about the values that were +// passed in. This information could be retrieved from the printf format string, +// but the format string is not trivial to parse so we encode the relevant +// information with the HLT instruction. +// +// Also, the following registers are populated (as if for a native Aarch64 +// call): +// x0: The format string +// x1-x7: Optional arguments, if type == CPURegister::kRegister +// d0-d7: Optional arguments, if type == CPURegister::kFPRegister +const unsigned kPrintfArgCountOffset = 1 * kInstructionSize; +const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize; +const unsigned kPrintfLength = 3 * kInstructionSize; + +const unsigned kPrintfMaxArgCount = 4; + +// The argument pattern is a set of two-bit-fields, each with one of the +// following values: +enum PrintfArgPattern { + kPrintfArgW = 1, + kPrintfArgX = 2, + // There is no kPrintfArgS because floats are always converted to doubles in C + // varargs calls. + kPrintfArgD = 3 +}; +static const unsigned kPrintfArgPatternBits = 2; + +// Trace - kTraceOpcode +// - parameter: TraceParameter stored as a uint32_t +// - command: TraceCommand stored as a uint32_t +// +// Allow for trace management in the generated code. This enables or disables +// automatic tracing of the specified information for every simulated +// instruction. +const unsigned kTraceParamsOffset = 1 * kInstructionSize; +const unsigned kTraceCommandOffset = 2 * kInstructionSize; +const unsigned kTraceLength = 3 * kInstructionSize; + +// Trace parameters. +enum TraceParameters { + LOG_DISASM = 1 << 0, // Log disassembly. + LOG_REGS = 1 << 1, // Log general purpose registers. + LOG_VREGS = 1 << 2, // Log NEON and floating-point registers. + LOG_SYSREGS = 1 << 3, // Log the flags and system registers. + LOG_WRITE = 1 << 4, // Log writes to memory. + LOG_BRANCH = 1 << 5, // Log taken branches. + + LOG_NONE = 0, + LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYSREGS, + LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE | LOG_BRANCH +}; + +// Trace commands. +enum TraceCommand { TRACE_ENABLE = 1, TRACE_DISABLE = 2 }; + +// Log - kLogOpcode +// - parameter: TraceParameter stored as a uint32_t +// +// Print the specified information once. This mechanism is separate from Trace. +// In particular, _all_ of the specified registers are printed, rather than just +// the registers that the instruction writes. +// +// Any combination of the TraceParameters values can be used, except that +// LOG_DISASM is not supported for Log. +const unsigned kLogParamsOffset = 1 * kInstructionSize; +const unsigned kLogLength = 2 * kInstructionSize; + +// Runtime call simulation - kRuntimeCallOpcode +enum RuntimeCallType { kCallRuntime, kTailCallRuntime }; + +const unsigned kRuntimeCallWrapperOffset = 1 * kInstructionSize; +// The size of a pointer on host. +const unsigned kRuntimeCallAddressSize = sizeof(uintptr_t); +const unsigned kRuntimeCallFunctionOffset = + kRuntimeCallWrapperOffset + kRuntimeCallAddressSize; +const unsigned kRuntimeCallTypeOffset = + kRuntimeCallFunctionOffset + kRuntimeCallAddressSize; +const unsigned kRuntimeCallLength = kRuntimeCallTypeOffset + sizeof(uint32_t); + +// Enable or disable CPU features - kSetCPUFeaturesOpcode +// - kEnableCPUFeaturesOpcode +// - kDisableCPUFeaturesOpcode +// - parameter[...]: A list of `CPUFeatures::Feature`s, encoded as +// ConfigureCPUFeaturesElementType and terminated with CPUFeatures::kNone. +// - [Padding to align to kInstructionSize.] +// +// 'Set' completely overwrites the existing CPU features. +// 'Enable' and 'Disable' update the existing CPU features. +// +// These mechanisms allows users to strictly check the use of CPU features in +// different regions of code. +// +// These have no effect on the set of 'seen' features (as reported by +// CPUFeaturesAuditor::HasSeen(...)). +typedef uint8_t ConfigureCPUFeaturesElementType; +const unsigned kConfigureCPUFeaturesListOffset = 1 * kInstructionSize; + +// Save or restore CPU features - kSaveCPUFeaturesOpcode +// - kRestoreCPUFeaturesOpcode +// +// These mechanisms provide a stack-like mechanism for preserving the CPU +// features, or restoring the last-preserved features. These pseudo-instructions +// take no arguments. +// +// These have no effect on the set of 'seen' features (as reported by +// CPUFeaturesAuditor::HasSeen(...)). + +} // namespace aarch64 +} // namespace vixl + +#endif // VIXL_AARCH64_SIMULATOR_CONSTANTS_AARCH64_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/assembler-base-vixl.h b/module/src/main/cpp/whale/src/assembler/vixl/assembler-base-vixl.h new file mode 100644 index 00000000..ee54dcbc --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/assembler-base-vixl.h @@ -0,0 +1,101 @@ +// Copyright 2016, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_ASSEMBLER_BASE_H +#define VIXL_ASSEMBLER_BASE_H + +#include "code-buffer-vixl.h" + +namespace vixl { + +class CodeBufferCheckScope; + +namespace internal { + +class AssemblerBase { + public: + AssemblerBase() : allow_assembler_(false) {} + explicit AssemblerBase(size_t capacity) + : buffer_(capacity), allow_assembler_(false) {} + AssemblerBase(byte* buffer, size_t capacity) + : buffer_(buffer, capacity), allow_assembler_(false) {} + + virtual ~AssemblerBase() {} + + // Finalize a code buffer of generated instructions. This function must be + // called before executing or copying code from the buffer. + void FinalizeCode() { GetBuffer()->SetClean(); } + + ptrdiff_t GetCursorOffset() const { return GetBuffer().GetCursorOffset(); } + + // Return the address of the cursor. + template + T GetCursorAddress() const { + VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); + return GetBuffer().GetOffsetAddress(GetCursorOffset()); + } + + size_t GetSizeOfCodeGenerated() const { return GetCursorOffset(); } + + // Accessors. + CodeBuffer* GetBuffer() { return &buffer_; } + const CodeBuffer& GetBuffer() const { return buffer_; } + bool AllowAssembler() const { return allow_assembler_; } + + protected: + void SetAllowAssembler(bool allow) { allow_assembler_ = allow; } + + // CodeBufferCheckScope must be able to temporarily allow the assembler. + friend class vixl::CodeBufferCheckScope; + + // Buffer where the code is emitted. + CodeBuffer buffer_; + + private: + bool allow_assembler_; + + public: + // Deprecated public interface. + + // Return the address of an offset in the buffer. + template + VIXL_DEPRECATED("GetBuffer().GetOffsetAddress(offset)", + T GetOffsetAddress(ptrdiff_t offset) const) { + return GetBuffer().GetOffsetAddress(offset); + } + + // Return the address of the start of the buffer. + template + VIXL_DEPRECATED("GetBuffer().GetStartAddress()", + T GetStartAddress() const) { + return GetBuffer().GetOffsetAddress(0); + } +}; + +} // namespace internal +} // namespace vixl + +#endif // VIXL_ASSEMBLER_BASE_H diff --git a/module/src/main/cpp/whale/src/assembler/vixl/code-buffer-vixl.cc b/module/src/main/cpp/whale/src/assembler/vixl/code-buffer-vixl.cc new file mode 100644 index 00000000..6d2469eb --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/code-buffer-vixl.cc @@ -0,0 +1,185 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +extern "C" { +#include +} + +#include "code-buffer-vixl.h" +#include "utils-vixl.h" + +namespace vixl { + + + CodeBuffer::CodeBuffer(size_t capacity) + : buffer_(NULL), + managed_(true), + cursor_(NULL), + dirty_(false), + capacity_(capacity) { + if (capacity_ == 0) { + return; + } +#ifdef VIXL_CODE_BUFFER_MALLOC + buffer_ = reinterpret_cast(malloc(capacity_)); +#elif defined(VIXL_CODE_BUFFER_MMAP) + buffer_ = reinterpret_cast(mmap(NULL, + capacity, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, + -1, + 0)); +#else +#error Unknown code buffer allocator. +#endif + VIXL_CHECK(buffer_ != NULL); + // Aarch64 instructions must be word aligned, we assert the default allocator + // always returns word align memory. + VIXL_ASSERT(IsWordAligned(buffer_)); + + cursor_ = buffer_; + } + + + CodeBuffer::CodeBuffer(byte *buffer, size_t capacity) + : buffer_(reinterpret_cast(buffer)), + managed_(false), + cursor_(reinterpret_cast(buffer)), + dirty_(false), + capacity_(capacity) { + VIXL_ASSERT(buffer_ != NULL); + } + + + CodeBuffer::~CodeBuffer() { + VIXL_ASSERT(!IsDirty()); + if (managed_) { +#ifdef VIXL_CODE_BUFFER_MALLOC + free(buffer_); +#elif defined(VIXL_CODE_BUFFER_MMAP) + munmap(buffer_, capacity_); +#else +#error Unknown code buffer allocator. +#endif + } + } + + +#ifdef VIXL_CODE_BUFFER_MMAP + void CodeBuffer::SetExecutable() { + int ret = mprotect(buffer_, capacity_, PROT_READ | PROT_EXEC); + VIXL_CHECK(ret == 0); + } +#endif + + +#ifdef VIXL_CODE_BUFFER_MMAP + void CodeBuffer::SetWritable() { + int ret = mprotect(buffer_, capacity_, PROT_READ | PROT_WRITE); + VIXL_CHECK(ret == 0); + } +#endif + +#if defined(__ANDROID_API__) && __ANDROID_API__ < 21 + char * + stpcpy(char *to, const char *from) { + for (; (*to = *from) != '\0'; ++from, ++to); + return (to); + } +#endif + + void CodeBuffer::EmitString(const char *string) { + VIXL_ASSERT(HasSpaceFor(strlen(string) + 1)); + char *dst = reinterpret_cast(cursor_); + dirty_ = true; + char *null_char = stpcpy(dst, string); + cursor_ = reinterpret_cast(null_char) + 1; + } + + + void CodeBuffer::EmitData(const void *data, size_t size) { + VIXL_ASSERT(HasSpaceFor(size)); + dirty_ = true; + memcpy(cursor_, data, size); + cursor_ = cursor_ + size; + } + + + void CodeBuffer::UpdateData(size_t offset, const void *data, size_t size) { + dirty_ = true; + byte *dst = buffer_ + offset; + VIXL_ASSERT(dst + size <= cursor_); + memcpy(dst, data, size); + } + + + void CodeBuffer::Align() { + byte *end = AlignUp(cursor_, 4); + const size_t padding_size = end - cursor_; + VIXL_ASSERT(padding_size <= 4); + EmitZeroedBytes(static_cast(padding_size)); + } + + void CodeBuffer::EmitZeroedBytes(int n) { + EnsureSpaceFor(n); + dirty_ = true; + memset(cursor_, 0, n); + cursor_ += n; + } + + void CodeBuffer::Reset() { +#ifdef VIXL_DEBUG + if (managed_) { + // Fill with zeros (there is no useful value common to A32 and T32). + memset(buffer_, 0, capacity_); + } +#endif + cursor_ = buffer_; + SetClean(); + } + + + void CodeBuffer::Grow(size_t new_capacity) { + VIXL_ASSERT(managed_); + VIXL_ASSERT(new_capacity > capacity_); + ptrdiff_t cursor_offset = GetCursorOffset(); +#ifdef VIXL_CODE_BUFFER_MALLOC + buffer_ = static_cast(realloc(buffer_, new_capacity)); + VIXL_CHECK(buffer_ != NULL); +#elif defined(VIXL_CODE_BUFFER_MMAP) + buffer_ = static_cast( + mremap(buffer_, capacity_, new_capacity, MREMAP_MAYMOVE)); + VIXL_CHECK(buffer_ != MAP_FAILED); +#else +#error Unknown code buffer allocator. +#endif + + cursor_ = buffer_ + cursor_offset; + capacity_ = new_capacity; + } + + +} // namespace vixl diff --git a/module/src/main/cpp/whale/src/assembler/vixl/code-buffer-vixl.h b/module/src/main/cpp/whale/src/assembler/vixl/code-buffer-vixl.h new file mode 100644 index 00000000..d0d815e4 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/code-buffer-vixl.h @@ -0,0 +1,191 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_CODE_BUFFER_H +#define VIXL_CODE_BUFFER_H + +#include + +#include "globals-vixl.h" +#include "utils-vixl.h" + +namespace vixl { + +class CodeBuffer { + public: + static const size_t kDefaultCapacity = 4 * KBytes; + + explicit CodeBuffer(size_t capacity = kDefaultCapacity); + CodeBuffer(byte* buffer, size_t capacity); + ~CodeBuffer(); + + void Reset(); + +#ifdef VIXL_CODE_BUFFER_MMAP + void SetExecutable(); + void SetWritable(); +#else + // These require page-aligned memory blocks, which we can only guarantee with + // mmap. + VIXL_NO_RETURN_IN_DEBUG_MODE void SetExecutable() { VIXL_UNIMPLEMENTED(); } + VIXL_NO_RETURN_IN_DEBUG_MODE void SetWritable() { VIXL_UNIMPLEMENTED(); } +#endif + + ptrdiff_t GetOffsetFrom(ptrdiff_t offset) const { + ptrdiff_t cursor_offset = cursor_ - buffer_; + VIXL_ASSERT((offset >= 0) && (offset <= cursor_offset)); + return cursor_offset - offset; + } + VIXL_DEPRECATED("GetOffsetFrom", + ptrdiff_t OffsetFrom(ptrdiff_t offset) const) { + return GetOffsetFrom(offset); + } + + ptrdiff_t GetCursorOffset() const { return GetOffsetFrom(0); } + VIXL_DEPRECATED("GetCursorOffset", ptrdiff_t CursorOffset() const) { + return GetCursorOffset(); + } + + void Rewind(ptrdiff_t offset) { + byte* rewound_cursor = buffer_ + offset; + VIXL_ASSERT((buffer_ <= rewound_cursor) && (rewound_cursor <= cursor_)); + cursor_ = rewound_cursor; + } + + template + T GetOffsetAddress(ptrdiff_t offset) const { + VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); + VIXL_ASSERT((offset >= 0) && (offset <= (cursor_ - buffer_))); + return reinterpret_cast(buffer_ + offset); + } + + // Return the address of the start or end of the emitted code. + template + T GetStartAddress() const { + VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); + return GetOffsetAddress(0); + } + template + T GetEndAddress() const { + VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); + return GetOffsetAddress(GetSizeInBytes()); + } + + size_t GetRemainingBytes() const { + VIXL_ASSERT((cursor_ >= buffer_) && (cursor_ <= (buffer_ + capacity_))); + return (buffer_ + capacity_) - cursor_; + } + VIXL_DEPRECATED("GetRemainingBytes", size_t RemainingBytes() const) { + return GetRemainingBytes(); + } + + size_t GetSizeInBytes() const { + VIXL_ASSERT((cursor_ >= buffer_) && (cursor_ <= (buffer_ + capacity_))); + return cursor_ - buffer_; + } + + // A code buffer can emit: + // * 8, 16, 32 or 64-bit data: constant. + // * 16 or 32-bit data: instruction. + // * string: debug info. + void Emit8(uint8_t data) { Emit(data); } + + void Emit16(uint16_t data) { Emit(data); } + + void Emit32(uint32_t data) { Emit(data); } + + void Emit64(uint64_t data) { Emit(data); } + + void EmitString(const char* string); + + void EmitData(const void* data, size_t size); + + template + void Emit(T value) { + VIXL_ASSERT(HasSpaceFor(sizeof(value))); + dirty_ = true; + memcpy(cursor_, &value, sizeof(value)); + cursor_ += sizeof(value); + } + + void UpdateData(size_t offset, const void* data, size_t size); + + // Align to 32bit. + void Align(); + + // Ensure there is enough space for and emit 'n' zero bytes. + void EmitZeroedBytes(int n); + + bool Is16bitAligned() const { return IsAligned<2>(cursor_); } + + bool Is32bitAligned() const { return IsAligned<4>(cursor_); } + + size_t GetCapacity() const { return capacity_; } + VIXL_DEPRECATED("GetCapacity", size_t capacity() const) { + return GetCapacity(); + } + + bool IsManaged() const { return managed_; } + + void Grow(size_t new_capacity); + + bool IsDirty() const { return dirty_; } + + void SetClean() { dirty_ = false; } + + bool HasSpaceFor(size_t amount) const { + return GetRemainingBytes() >= amount; + } + + void EnsureSpaceFor(size_t amount, bool* has_grown) { + bool is_full = !HasSpaceFor(amount); + if (is_full) Grow(capacity_ * 2 + amount); + VIXL_ASSERT(has_grown != NULL); + *has_grown = is_full; + } + void EnsureSpaceFor(size_t amount) { + bool dummy; + EnsureSpaceFor(amount, &dummy); + } + + private: + // Backing store of the buffer. + byte* buffer_; + // If true the backing store is allocated and deallocated by the buffer. The + // backing store can then grow on demand. If false the backing store is + // provided by the user and cannot be resized internally. + bool managed_; + // Pointer to the next location to be written. + byte* cursor_; + // True if there has been any write since the buffer was created or cleaned. + bool dirty_; + // Capacity in bytes of the backing store. + size_t capacity_; +}; + +} // namespace vixl + +#endif // VIXL_CODE_BUFFER_H diff --git a/module/src/main/cpp/whale/src/assembler/vixl/code-generation-scopes-vixl.h b/module/src/main/cpp/whale/src/assembler/vixl/code-generation-scopes-vixl.h new file mode 100644 index 00000000..b7ea2d92 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/code-generation-scopes-vixl.h @@ -0,0 +1,322 @@ +// Copyright 2016, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#ifndef VIXL_CODE_GENERATION_SCOPES_H_ +#define VIXL_CODE_GENERATION_SCOPES_H_ + + +#include "assembler-base-vixl.h" +#include "macro-assembler-interface.h" + + +namespace vixl { + +// This scope will: +// - Allow code emission from the specified `Assembler`. +// - Optionally reserve space in the `CodeBuffer` (if it is managed by VIXL). +// - Optionally, on destruction, check the size of the generated code. +// (The size can be either exact or a maximum size.) +class CodeBufferCheckScope { + public: + // Tell whether or not the scope needs to ensure the associated CodeBuffer + // has enough space for the requested size. + enum BufferSpacePolicy { + kReserveBufferSpace, + kDontReserveBufferSpace, + + // Deprecated, but kept for backward compatibility. + kCheck = kReserveBufferSpace, + kNoCheck = kDontReserveBufferSpace + }; + + // Tell whether or not the scope should assert the amount of code emitted + // within the scope is consistent with the requested amount. + enum SizePolicy { + kNoAssert, // Do not check the size of the code emitted. + kExactSize, // The code emitted must be exactly size bytes. + kMaximumSize // The code emitted must be at most size bytes. + }; + + // This constructor implicitly calls `Open` to initialise the scope + // (`assembler` must not be `NULL`), so it is ready to use immediately after + // it has been constructed. + CodeBufferCheckScope(internal::AssemblerBase* assembler, + size_t size, + BufferSpacePolicy check_policy = kReserveBufferSpace, + SizePolicy size_policy = kMaximumSize) + : assembler_(NULL), initialised_(false) { + Open(assembler, size, check_policy, size_policy); + } + + // This constructor does not implicitly initialise the scope. Instead, the + // user is required to explicitly call the `Open` function before using the + // scope. + CodeBufferCheckScope() : assembler_(NULL), initialised_(false) { + // Nothing to do. + } + + virtual ~CodeBufferCheckScope() { Close(); } + + // This function performs the actual initialisation work. + void Open(internal::AssemblerBase* assembler, + size_t size, + BufferSpacePolicy check_policy = kReserveBufferSpace, + SizePolicy size_policy = kMaximumSize) { + VIXL_ASSERT(!initialised_); + VIXL_ASSERT(assembler != NULL); + assembler_ = assembler; + if (check_policy == kReserveBufferSpace) { + assembler->GetBuffer()->EnsureSpaceFor(size); + } +#ifdef VIXL_DEBUG + limit_ = assembler_->GetSizeOfCodeGenerated() + size; + assert_policy_ = size_policy; + previous_allow_assembler_ = assembler_->AllowAssembler(); + assembler_->SetAllowAssembler(true); +#else + USE(size_policy); +#endif + initialised_ = true; + } + + // This function performs the cleaning-up work. It must succeed even if the + // scope has not been opened. It is safe to call multiple times. + void Close() { +#ifdef VIXL_DEBUG + if (!initialised_) { + return; + } + assembler_->SetAllowAssembler(previous_allow_assembler_); + switch (assert_policy_) { + case kNoAssert: + break; + case kExactSize: + VIXL_ASSERT(assembler_->GetSizeOfCodeGenerated() == limit_); + break; + case kMaximumSize: + VIXL_ASSERT(assembler_->GetSizeOfCodeGenerated() <= limit_); + break; + default: + VIXL_UNREACHABLE(); + } +#endif + initialised_ = false; + } + + protected: + internal::AssemblerBase* assembler_; + SizePolicy assert_policy_; + size_t limit_; + bool previous_allow_assembler_; + bool initialised_; +}; + + +// This scope will: +// - Do the same as `CodeBufferCheckSCope`, but: +// - If managed by VIXL, always reserve space in the `CodeBuffer`. +// - Always check the size (exact or maximum) of the generated code on +// destruction. +// - Emit pools if the specified size would push them out of range. +// - Block pools emission for the duration of the scope. +// This scope allows the `Assembler` and `MacroAssembler` to be freely and +// safely mixed for its duration. +class EmissionCheckScope : public CodeBufferCheckScope { + public: + // This constructor implicitly calls `Open` (when `masm` is not `NULL`) to + // initialise the scope, so it is ready to use immediately after it has been + // constructed. + EmissionCheckScope(MacroAssemblerInterface* masm, + size_t size, + SizePolicy size_policy = kMaximumSize) { + Open(masm, size, size_policy); + } + + // This constructor does not implicitly initialise the scope. Instead, the + // user is required to explicitly call the `Open` function before using the + // scope. + EmissionCheckScope() {} + + virtual ~EmissionCheckScope() { Close(); } + + enum PoolPolicy { + // Do not forbid pool emission inside the scope. Pools will not be emitted + // on `Open` either. + kIgnorePools, + // Force pools to be generated on `Open` if necessary and block their + // emission inside the scope. + kBlockPools, + // Deprecated, but kept for backward compatibility. + kCheckPools = kBlockPools + }; + + void Open(MacroAssemblerInterface* masm, + size_t size, + SizePolicy size_policy = kMaximumSize) { + Open(masm, size, size_policy, kBlockPools); + } + + void Close() { + if (!initialised_) { + return; + } + if (masm_ == NULL) { + // Nothing to do. + return; + } + // Perform the opposite of `Open`, which is: + // - Check the code generation limit was not exceeded. + // - Release the pools. + CodeBufferCheckScope::Close(); + if (pool_policy_ == kBlockPools) { + masm_->ReleasePools(); + } + VIXL_ASSERT(!initialised_); + } + + protected: + void Open(MacroAssemblerInterface* masm, + size_t size, + SizePolicy size_policy, + PoolPolicy pool_policy) { + if (masm == NULL) { + // Nothing to do. + // We may reach this point in a context of conditional code generation. + // See `aarch64::MacroAssembler::MoveImmediateHelper()` for an example. + return; + } + masm_ = masm; + pool_policy_ = pool_policy; + if (pool_policy_ == kBlockPools) { + // To avoid duplicating the work to check that enough space is available + // in the buffer, do not use the more generic `EnsureEmitFor()`. It is + // done below when opening `CodeBufferCheckScope`. + masm->EnsureEmitPoolsFor(size); + masm->BlockPools(); + } + // The buffer should be checked *after* we emit the pools. + CodeBufferCheckScope::Open(masm->AsAssemblerBase(), + size, + kReserveBufferSpace, + size_policy); + VIXL_ASSERT(initialised_); + } + + // This constructor should only be used from code that is *currently + // generating* the pools, to avoid an infinite loop. + EmissionCheckScope(MacroAssemblerInterface* masm, + size_t size, + SizePolicy size_policy, + PoolPolicy pool_policy) { + Open(masm, size, size_policy, pool_policy); + } + + MacroAssemblerInterface* masm_; + PoolPolicy pool_policy_; +}; + +// Use this scope when you need a one-to-one mapping between methods and +// instructions. This scope will: +// - Do the same as `EmissionCheckScope`. +// - Block access to the MacroAssemblerInterface (using run-time assertions). +class ExactAssemblyScope : public EmissionCheckScope { + public: + // This constructor implicitly calls `Open` (when `masm` is not `NULL`) to + // initialise the scope, so it is ready to use immediately after it has been + // constructed. + ExactAssemblyScope(MacroAssemblerInterface* masm, + size_t size, + SizePolicy size_policy = kExactSize) { + Open(masm, size, size_policy); + } + + // This constructor does not implicitly initialise the scope. Instead, the + // user is required to explicitly call the `Open` function before using the + // scope. + ExactAssemblyScope() {} + + virtual ~ExactAssemblyScope() { Close(); } + + void Open(MacroAssemblerInterface* masm, + size_t size, + SizePolicy size_policy = kExactSize) { + Open(masm, size, size_policy, kBlockPools); + } + + void Close() { + if (!initialised_) { + return; + } + if (masm_ == NULL) { + // Nothing to do. + return; + } +#ifdef VIXL_DEBUG + masm_->SetAllowMacroInstructions(previous_allow_macro_assembler_); +#else + USE(previous_allow_macro_assembler_); +#endif + EmissionCheckScope::Close(); + } + + protected: + // This protected constructor allows overriding the pool policy. It is + // available to allow this scope to be used in code that handles generation + // of pools. + ExactAssemblyScope(MacroAssemblerInterface* masm, + size_t size, + SizePolicy assert_policy, + PoolPolicy pool_policy) { + Open(masm, size, assert_policy, pool_policy); + } + + void Open(MacroAssemblerInterface* masm, + size_t size, + SizePolicy size_policy, + PoolPolicy pool_policy) { + VIXL_ASSERT(size_policy != kNoAssert); + if (masm == NULL) { + // Nothing to do. + return; + } + // Rely on EmissionCheckScope::Open to initialise `masm_` and + // `pool_policy_`. + EmissionCheckScope::Open(masm, size, size_policy, pool_policy); +#ifdef VIXL_DEBUG + previous_allow_macro_assembler_ = masm->AllowMacroInstructions(); + masm->SetAllowMacroInstructions(false); +#endif + } + + private: + bool previous_allow_macro_assembler_; +}; + + +} // namespace vixl + +#endif // VIXL_CODE_GENERATION_SCOPES_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/compiler-intrinsics-vixl.cc b/module/src/main/cpp/whale/src/assembler/vixl/compiler-intrinsics-vixl.cc new file mode 100644 index 00000000..ae182c7d --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/compiler-intrinsics-vixl.cc @@ -0,0 +1,144 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "compiler-intrinsics-vixl.h" + +namespace vixl { + + +int CountLeadingSignBitsFallBack(int64_t value, int width) { + VIXL_ASSERT(IsPowerOf2(width) && (width <= 64)); + if (value >= 0) { + return CountLeadingZeros(value, width) - 1; + } else { + return CountLeadingZeros(~value, width) - 1; + } +} + + +int CountLeadingZerosFallBack(uint64_t value, int width) { + VIXL_ASSERT(IsPowerOf2(width) && (width <= 64)); + if (value == 0) { + return width; + } + int count = 0; + value = value << (64 - width); + if ((value & UINT64_C(0xffffffff00000000)) == 0) { + count += 32; + value = value << 32; + } + if ((value & UINT64_C(0xffff000000000000)) == 0) { + count += 16; + value = value << 16; + } + if ((value & UINT64_C(0xff00000000000000)) == 0) { + count += 8; + value = value << 8; + } + if ((value & UINT64_C(0xf000000000000000)) == 0) { + count += 4; + value = value << 4; + } + if ((value & UINT64_C(0xc000000000000000)) == 0) { + count += 2; + value = value << 2; + } + if ((value & UINT64_C(0x8000000000000000)) == 0) { + count += 1; + } + count += (value == 0); + return count; +} + + +int CountSetBitsFallBack(uint64_t value, int width) { + VIXL_ASSERT(IsPowerOf2(width) && (width <= 64)); + + // Mask out unused bits to ensure that they are not counted. + value &= (UINT64_C(0xffffffffffffffff) >> (64 - width)); + + // Add up the set bits. + // The algorithm works by adding pairs of bit fields together iteratively, + // where the size of each bit field doubles each time. + // An example for an 8-bit value: + // Bits: h g f e d c b a + // \ | \ | \ | \ | + // value = h+g f+e d+c b+a + // \ | \ | + // value = h+g+f+e d+c+b+a + // \ | + // value = h+g+f+e+d+c+b+a + const uint64_t kMasks[] = { + UINT64_C(0x5555555555555555), + UINT64_C(0x3333333333333333), + UINT64_C(0x0f0f0f0f0f0f0f0f), + UINT64_C(0x00ff00ff00ff00ff), + UINT64_C(0x0000ffff0000ffff), + UINT64_C(0x00000000ffffffff), + }; + + for (unsigned i = 0; i < (sizeof(kMasks) / sizeof(kMasks[0])); i++) { + int shift = 1 << i; + value = ((value >> shift) & kMasks[i]) + (value & kMasks[i]); + } + + return static_cast(value); +} + + +int CountTrailingZerosFallBack(uint64_t value, int width) { + VIXL_ASSERT(IsPowerOf2(width) && (width <= 64)); + int count = 0; + value = value << (64 - width); + if ((value & UINT64_C(0xffffffff)) == 0) { + count += 32; + value = value >> 32; + } + if ((value & 0xffff) == 0) { + count += 16; + value = value >> 16; + } + if ((value & 0xff) == 0) { + count += 8; + value = value >> 8; + } + if ((value & 0xf) == 0) { + count += 4; + value = value >> 4; + } + if ((value & 0x3) == 0) { + count += 2; + value = value >> 2; + } + if ((value & 0x1) == 0) { + count += 1; + } + count += (value == 0); + return count - (64 - width); +} + + +} // namespace vixl diff --git a/module/src/main/cpp/whale/src/assembler/vixl/compiler-intrinsics-vixl.h b/module/src/main/cpp/whale/src/assembler/vixl/compiler-intrinsics-vixl.h new file mode 100644 index 00000000..b27f94eb --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/compiler-intrinsics-vixl.h @@ -0,0 +1,160 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#ifndef VIXL_COMPILER_INTRINSICS_H +#define VIXL_COMPILER_INTRINSICS_H + +#include "globals-vixl.h" + +namespace vixl { + +// Helper to check whether the version of GCC used is greater than the specified +// requirement. +#define MAJOR 1000000 +#define MINOR 1000 +#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) +#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \ + ((__GNUC__ * (MAJOR) + __GNUC_MINOR__ * (MINOR) + __GNUC_PATCHLEVEL__) >= \ + ((major) * (MAJOR) + ((minor)) * (MINOR) + (patchlevel))) +#elif defined(__GNUC__) && defined(__GNUC_MINOR__) +#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \ + ((__GNUC__ * (MAJOR) + __GNUC_MINOR__ * (MINOR)) >= \ + ((major) * (MAJOR) + ((minor)) * (MINOR) + (patchlevel))) +#else +#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) 0 +#endif + + +#if defined(__clang__) && !defined(VIXL_NO_COMPILER_BUILTINS) + +// clang-format off +#define COMPILER_HAS_BUILTIN_CLRSB (__has_builtin(__builtin_clrsb)) +#define COMPILER_HAS_BUILTIN_CLZ (__has_builtin(__builtin_clz)) +#define COMPILER_HAS_BUILTIN_CTZ (__has_builtin(__builtin_ctz)) +#define COMPILER_HAS_BUILTIN_FFS (__has_builtin(__builtin_ffs)) +#define COMPILER_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount)) +// clang-format on + +#elif defined(__GNUC__) && !defined(VIXL_NO_COMPILER_BUILTINS) +// The documentation for these builtins is available at: +// https://gcc.gnu.org/onlinedocs/gcc-$MAJOR.$MINOR.$PATCHLEVEL/gcc//Other-Builtins.html + +// clang-format off +# define COMPILER_HAS_BUILTIN_CLRSB (GCC_VERSION_OR_NEWER(4, 7, 0)) +# define COMPILER_HAS_BUILTIN_CLZ (GCC_VERSION_OR_NEWER(3, 4, 0)) +# define COMPILER_HAS_BUILTIN_CTZ (GCC_VERSION_OR_NEWER(3, 4, 0)) +# define COMPILER_HAS_BUILTIN_FFS (GCC_VERSION_OR_NEWER(3, 4, 0)) +# define COMPILER_HAS_BUILTIN_POPCOUNT (GCC_VERSION_OR_NEWER(3, 4, 0)) +// clang-format on + +#else +// One can define VIXL_NO_COMPILER_BUILTINS to force using the manually +// implemented C++ methods. + +// clang-format off +#define COMPILER_HAS_BUILTIN_BSWAP false +#define COMPILER_HAS_BUILTIN_CLRSB false +#define COMPILER_HAS_BUILTIN_CLZ false +#define COMPILER_HAS_BUILTIN_CTZ false +#define COMPILER_HAS_BUILTIN_FFS false +#define COMPILER_HAS_BUILTIN_POPCOUNT false +// clang-format on + +#endif + + +template +inline bool IsPowerOf2(V value) { + return (value != 0) && ((value & (value - 1)) == 0); +} + + +// Declaration of fallback functions. +int CountLeadingSignBitsFallBack(int64_t value, int width); +int CountLeadingZerosFallBack(uint64_t value, int width); +int CountSetBitsFallBack(uint64_t value, int width); +int CountTrailingZerosFallBack(uint64_t value, int width); + + +// Implementation of intrinsics functions. +// TODO: The implementations could be improved for sizes different from 32bit +// and 64bit: we could mask the values and call the appropriate builtin. + +template +inline int CountLeadingSignBits(V value, int width = (sizeof(V) * 8)) { +#if COMPILER_HAS_BUILTIN_CLRSB + if (width == 32) { + return __builtin_clrsb(value); + } else if (width == 64) { + return __builtin_clrsbll(value); + } +#endif + return CountLeadingSignBitsFallBack(value, width); +} + + +template +inline int CountLeadingZeros(V value, int width = (sizeof(V) * 8)) { +#if COMPILER_HAS_BUILTIN_CLZ + if (width == 32) { + return (value == 0) ? 32 : __builtin_clz(static_cast(value)); + } else if (width == 64) { + return (value == 0) ? 64 : __builtin_clzll(value); + } +#endif + return CountLeadingZerosFallBack(value, width); +} + + +template +inline int CountSetBits(V value, int width = (sizeof(V) * 8)) { +#if COMPILER_HAS_BUILTIN_POPCOUNT + if (width == 32) { + return __builtin_popcount(static_cast(value)); + } else if (width == 64) { + return __builtin_popcountll(value); + } +#endif + return CountSetBitsFallBack(value, width); +} + + +template +inline int CountTrailingZeros(V value, int width = (sizeof(V) * 8)) { +#if COMPILER_HAS_BUILTIN_CTZ + if (width == 32) { + return (value == 0) ? 32 : __builtin_ctz(static_cast(value)); + } else if (width == 64) { + return (value == 0) ? 64 : __builtin_ctzll(value); + } +#endif + return CountTrailingZerosFallBack(value, width); +} + +} // namespace vixl + +#endif // VIXL_COMPILER_INTRINSICS_H diff --git a/module/src/main/cpp/whale/src/assembler/vixl/cpu-features.cc b/module/src/main/cpp/whale/src/assembler/vixl/cpu-features.cc new file mode 100644 index 00000000..c3666700 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/cpu-features.cc @@ -0,0 +1,211 @@ +// Copyright 2018, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include "cpu-features.h" +#include "globals-vixl.h" +#include "utils-vixl.h" + +namespace vixl { + +static uint64_t MakeFeatureMask(CPUFeatures::Feature feature) { + if (feature == CPUFeatures::kNone) { + return 0; + } else { + // Check that the shift is well-defined, and that the feature is valid. + VIXL_STATIC_ASSERT(CPUFeatures::kNumberOfFeatures <= + (sizeof(uint64_t) * 8)); + VIXL_ASSERT(feature < CPUFeatures::kNumberOfFeatures); + return UINT64_C(1) << feature; + } +} + +CPUFeatures::CPUFeatures(Feature feature0, + Feature feature1, + Feature feature2, + Feature feature3) + : features_(0) { + Combine(feature0, feature1, feature2, feature3); +} + +CPUFeatures CPUFeatures::All() { + CPUFeatures all; + // Check that the shift is well-defined. + VIXL_STATIC_ASSERT(CPUFeatures::kNumberOfFeatures < (sizeof(uint64_t) * 8)); + all.features_ = (UINT64_C(1) << kNumberOfFeatures) - 1; + return all; +} + +CPUFeatures CPUFeatures::InferFromOS() { + // TODO: Actually infer features from the OS. + return CPUFeatures(); +} + +void CPUFeatures::Combine(const CPUFeatures& other) { + features_ |= other.features_; +} + +void CPUFeatures::Combine(Feature feature0, + Feature feature1, + Feature feature2, + Feature feature3) { + features_ |= MakeFeatureMask(feature0); + features_ |= MakeFeatureMask(feature1); + features_ |= MakeFeatureMask(feature2); + features_ |= MakeFeatureMask(feature3); +} + +void CPUFeatures::Remove(const CPUFeatures& other) { + features_ &= ~other.features_; +} + +void CPUFeatures::Remove(Feature feature0, + Feature feature1, + Feature feature2, + Feature feature3) { + features_ &= ~MakeFeatureMask(feature0); + features_ &= ~MakeFeatureMask(feature1); + features_ &= ~MakeFeatureMask(feature2); + features_ &= ~MakeFeatureMask(feature3); +} + +CPUFeatures CPUFeatures::With(const CPUFeatures& other) const { + CPUFeatures f(*this); + f.Combine(other); + return f; +} + +CPUFeatures CPUFeatures::With(Feature feature0, + Feature feature1, + Feature feature2, + Feature feature3) const { + CPUFeatures f(*this); + f.Combine(feature0, feature1, feature2, feature3); + return f; +} + +CPUFeatures CPUFeatures::Without(const CPUFeatures& other) const { + CPUFeatures f(*this); + f.Remove(other); + return f; +} + +CPUFeatures CPUFeatures::Without(Feature feature0, + Feature feature1, + Feature feature2, + Feature feature3) const { + CPUFeatures f(*this); + f.Remove(feature0, feature1, feature2, feature3); + return f; +} + +bool CPUFeatures::Has(const CPUFeatures& other) const { + return (features_ & other.features_) == other.features_; +} + +bool CPUFeatures::Has(Feature feature0, + Feature feature1, + Feature feature2, + Feature feature3) const { + uint64_t mask = MakeFeatureMask(feature0) | MakeFeatureMask(feature1) | + MakeFeatureMask(feature2) | MakeFeatureMask(feature3); + return (features_ & mask) == mask; +} + +size_t CPUFeatures::Count() const { return CountSetBits(features_); } + +std::ostream& operator<<(std::ostream& os, CPUFeatures::Feature feature) { + // clang-format off + switch (feature) { +#define VIXL_FORMAT_FEATURE(SYMBOL, NAME, CPUINFO) \ + case CPUFeatures::SYMBOL: \ + return os << NAME; +VIXL_CPU_FEATURE_LIST(VIXL_FORMAT_FEATURE) +#undef VIXL_FORMAT_FEATURE + case CPUFeatures::kNone: + return os << "none"; + case CPUFeatures::kNumberOfFeatures: + VIXL_UNREACHABLE(); + } + // clang-format on + VIXL_UNREACHABLE(); + return os; +} + +CPUFeatures::const_iterator CPUFeatures::begin() const { + if (features_ == 0) return const_iterator(this, kNone); + + int feature_number = CountTrailingZeros(features_); + vixl::CPUFeatures::Feature feature = + static_cast(feature_number); + return const_iterator(this, feature); +} + +CPUFeatures::const_iterator CPUFeatures::end() const { + return const_iterator(this, kNone); +} + +std::ostream& operator<<(std::ostream& os, const CPUFeatures& features) { + CPUFeatures::const_iterator it = features.begin(); + while (it != features.end()) { + os << *it; + ++it; + if (it != features.end()) os << ", "; + } + return os; +} + +bool CPUFeaturesConstIterator::operator==( + const CPUFeaturesConstIterator& other) const { + VIXL_ASSERT(IsValid()); + return (cpu_features_ == other.cpu_features_) && (feature_ == other.feature_); +} + +CPUFeatures::Feature CPUFeaturesConstIterator::operator++() { // Prefix + VIXL_ASSERT(IsValid()); + do { + // Find the next feature. The order is unspecified. + feature_ = static_cast(feature_ + 1); + if (feature_ == CPUFeatures::kNumberOfFeatures) { + feature_ = CPUFeatures::kNone; + VIXL_STATIC_ASSERT(CPUFeatures::kNone == -1); + } + VIXL_ASSERT(CPUFeatures::kNone <= feature_); + VIXL_ASSERT(feature_ < CPUFeatures::kNumberOfFeatures); + // cpu_features_->Has(kNone) is always true, so this will terminate even if + // the features list is empty. + } while (!cpu_features_->Has(feature_)); + return feature_; +} + +CPUFeatures::Feature CPUFeaturesConstIterator::operator++(int) { // Postfix + CPUFeatures::Feature result = feature_; + ++(*this); + return result; +} + +} // namespace vixl diff --git a/module/src/main/cpp/whale/src/assembler/vixl/cpu-features.h b/module/src/main/cpp/whale/src/assembler/vixl/cpu-features.h new file mode 100644 index 00000000..fffef44f --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/cpu-features.h @@ -0,0 +1,379 @@ +// Copyright 2018, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_CPU_FEATURES_H +#define VIXL_CPU_FEATURES_H + +#include + +#include "globals-vixl.h" + +namespace vixl { + + +// clang-format off +#define VIXL_CPU_FEATURE_LIST(V) \ + /* If set, the OS traps and emulates MRS accesses to relevant (EL1) ID_* */ \ + /* registers, so that the detailed feature registers can be read */ \ + /* directly. */ \ + V(kIDRegisterEmulation, "ID register emulation", "cpuid") \ + \ + V(kFP, "FP", "fp") \ + V(kNEON, "NEON", "asimd") \ + V(kCRC32, "CRC32", "crc32") \ + /* Cryptographic support instructions. */ \ + V(kAES, "AES", "aes") \ + V(kSHA1, "SHA1", "sha1") \ + V(kSHA2, "SHA2", "sha2") \ + /* A form of PMULL{2} with a 128-bit (1Q) result. */ \ + V(kPmull1Q, "Pmull1Q", "pmull") \ + /* Atomic operations on memory: CAS, LDADD, STADD, SWP, etc. */ \ + V(kAtomics, "Atomics", "atomics") \ + /* Limited ordering regions: LDLAR, STLLR and their variants. */ \ + V(kLORegions, "LORegions", NULL) \ + /* Rounding doubling multiply add/subtract: SQRDMLAH and SQRDMLSH. */ \ + V(kRDM, "RDM", "asimdrdm") \ + /* Scalable Vector Extension. */ \ + V(kSVE, "SVE", "sve") \ + /* SDOT and UDOT support (in NEON). */ \ + V(kDotProduct, "DotProduct", "asimddp") \ + /* Half-precision (FP16) support for FP and NEON, respectively. */ \ + V(kFPHalf, "FPHalf", "fphp") \ + V(kNEONHalf, "NEONHalf", "asimdhp") \ + /* The RAS extension, including the ESB instruction. */ \ + V(kRAS, "RAS", NULL) \ + /* Data cache clean to the point of persistence: DC CVAP. */ \ + V(kDCPoP, "DCPoP", "dcpop") \ + /* Cryptographic support instructions. */ \ + V(kSHA3, "SHA3", "sha3") \ + V(kSHA512, "SHA512", "sha512") \ + V(kSM3, "SM3", "sm3") \ + V(kSM4, "SM4", "sm4") \ + /* Pointer authentication for addresses. */ \ + V(kPAuth, "PAuth", NULL) \ + /* Pointer authentication for addresses uses QARMA. */ \ + V(kPAuthQARMA, "PAuthQARMA", NULL) \ + /* Generic authentication (using the PACGA instruction). */ \ + V(kPAuthGeneric, "PAuthGeneric", NULL) \ + /* Generic authentication uses QARMA. */ \ + V(kPAuthGenericQARMA, "PAuthGenericQARMA", NULL) \ + /* JavaScript-style FP -> integer conversion instruction: FJCVTZS. */ \ + V(kJSCVT, "JSCVT", "jscvt") \ + /* Complex number support for NEON: FCMLA and FCADD. */ \ + V(kFcma, "Fcma", "fcma") \ + /* RCpc-based model (for weaker release consistency): LDAPR and variants. */ \ + V(kRCpc, "RCpc", "lrcpc") \ + V(kRCpcImm, "RCpc (imm)", "ilrcpc") \ + /* Flag manipulation instructions: SETF{8,16}, CFINV, RMIF. */ \ + V(kFlagM, "FlagM", "flagm") \ + /* Unaligned single-copy atomicity. */ \ + V(kUSCAT, "USCAT", "uscat") \ + /* FP16 fused multiply-add or -subtract long: FMLAL{2}, FMLSL{2}. */ \ + V(kFHM, "FHM", "asimdfhm") \ + /* Data-independent timing (for selected instructions). */ \ + V(kDIT, "DIT", "dit") \ + /* Branch target identification. */ \ + V(kBTI, "BTI", NULL) \ + /* Flag manipulation instructions: {AX,XA}FLAG */ \ + V(kAXFlag, "AXFlag", NULL) +// clang-format on + + +class CPUFeaturesConstIterator; + +// A representation of the set of features known to be supported by the target +// device. Each feature is represented by a simple boolean flag. +// +// - When the Assembler is asked to assemble an instruction, it asserts (in +// debug mode) that the necessary features are available. +// +// - TODO: The MacroAssembler relies on the Assembler's assertions, but in +// some cases it may be useful for macros to generate a fall-back sequence +// in case features are not available. +// +// - The Simulator assumes by default that all features are available, but it +// is possible to configure it to fail if the simulated code uses features +// that are not enabled. +// +// The Simulator also offers pseudo-instructions to allow features to be +// enabled and disabled dynamically. This is useful when you want to ensure +// that some features are constrained to certain areas of code. +// +// - The base Disassembler knows nothing about CPU features, but the +// PrintDisassembler can be configured to annotate its output with warnings +// about unavailable features. The Simulator uses this feature when +// instruction trace is enabled. +// +// - The Decoder-based components -- the Simulator and PrintDisassembler -- +// rely on a CPUFeaturesAuditor visitor. This visitor keeps a list of +// features actually encountered so that a large block of code can be +// examined (either directly or through simulation), and the required +// features analysed later. +// +// Expected usage: +// +// // By default, VIXL uses CPUFeatures::AArch64LegacyBaseline(), for +// // compatibility with older version of VIXL. +// MacroAssembler masm; +// +// // Generate code only for the current CPU. +// masm.SetCPUFeatures(CPUFeatures::InferFromOS()); +// +// // Turn off feature checking entirely. +// masm.SetCPUFeatures(CPUFeatures::All()); +// +// Feature set manipulation: +// +// CPUFeatures f; // The default constructor gives an empty set. +// // Individual features can be added (or removed). +// f.Combine(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::AES); +// f.Remove(CPUFeatures::kNEON); +// +// // Some helpers exist for extensions that provide several features. +// f.Remove(CPUFeatures::All()); +// f.Combine(CPUFeatures::AArch64LegacyBaseline()); +// +// // Chained construction is also possible. +// CPUFeatures g = +// f.With(CPUFeatures::kPmull1Q).Without(CPUFeatures::kCRC32); +// +// // Features can be queried. Where multiple features are given, they are +// // combined with logical AND. +// if (h.Has(CPUFeatures::kNEON)) { ... } +// if (h.Has(CPUFeatures::kFP, CPUFeatures::kNEON)) { ... } +// if (h.Has(g)) { ... } +// // If the empty set is requested, the result is always 'true'. +// VIXL_ASSERT(h.Has(CPUFeatures())); +// +// // For debug and reporting purposes, features can be enumerated (or +// // printed directly): +// std::cout << CPUFeatures::kNEON; // Prints something like "NEON". +// std::cout << f; // Prints something like "FP, NEON, CRC32". +class CPUFeatures { + public: + // clang-format off + // Individual features. + // These should be treated as opaque tokens. User code should not rely on + // specific numeric values or ordering. + enum Feature { + // Refer to VIXL_CPU_FEATURE_LIST (above) for the list of feature names that + // this class supports. + + kNone = -1, +#define VIXL_DECLARE_FEATURE(SYMBOL, NAME, CPUINFO) SYMBOL, + VIXL_CPU_FEATURE_LIST(VIXL_DECLARE_FEATURE) +#undef VIXL_DECLARE_FEATURE + kNumberOfFeatures + }; + // clang-format on + + // By default, construct with no features enabled. + CPUFeatures() : features_(0) {} + + // Construct with some features already enabled. + CPUFeatures(Feature feature0, + Feature feature1 = kNone, + Feature feature2 = kNone, + Feature feature3 = kNone); + + // Construct with all features enabled. This can be used to disable feature + // checking: `Has(...)` returns true regardless of the argument. + static CPUFeatures All(); + + // Construct an empty CPUFeatures. This is equivalent to the default + // constructor, but is provided for symmetry and convenience. + static CPUFeatures None() { return CPUFeatures(); } + + // The presence of these features was assumed by version of VIXL before this + // API was added, so using this set by default ensures API compatibility. + static CPUFeatures AArch64LegacyBaseline() { + return CPUFeatures(kFP, kNEON, kCRC32); + } + + // Construct a new CPUFeatures object based on what the OS reports. + static CPUFeatures InferFromOS(); + + // Combine another CPUFeatures object into this one. Features that already + // exist in this set are left unchanged. + void Combine(const CPUFeatures& other); + + // Combine specific features into this set. Features that already exist in + // this set are left unchanged. + void Combine(Feature feature0, + Feature feature1 = kNone, + Feature feature2 = kNone, + Feature feature3 = kNone); + + // Remove features in another CPUFeatures object from this one. + void Remove(const CPUFeatures& other); + + // Remove specific features from this set. + void Remove(Feature feature0, + Feature feature1 = kNone, + Feature feature2 = kNone, + Feature feature3 = kNone); + + // Chaining helpers for convenient construction. + CPUFeatures With(const CPUFeatures& other) const; + CPUFeatures With(Feature feature0, + Feature feature1 = kNone, + Feature feature2 = kNone, + Feature feature3 = kNone) const; + CPUFeatures Without(const CPUFeatures& other) const; + CPUFeatures Without(Feature feature0, + Feature feature1 = kNone, + Feature feature2 = kNone, + Feature feature3 = kNone) const; + + // Query features. + // Note that an empty query (like `Has(kNone)`) always returns true. + bool Has(const CPUFeatures& other) const; + bool Has(Feature feature0, + Feature feature1 = kNone, + Feature feature2 = kNone, + Feature feature3 = kNone) const; + + // Return the number of enabled features. + size_t Count() const; + + // Check for equivalence. + bool operator==(const CPUFeatures& other) const { + return Has(other) && other.Has(*this); + } + bool operator!=(const CPUFeatures& other) const { return !(*this == other); } + + typedef CPUFeaturesConstIterator const_iterator; + + const_iterator begin() const; + const_iterator end() const; + + private: + // Each bit represents a feature. This field will be replaced as needed if + // features are added. + uint64_t features_; + + friend std::ostream& operator<<(std::ostream& os, + const vixl::CPUFeatures& features); +}; + +std::ostream& operator<<(std::ostream& os, vixl::CPUFeatures::Feature feature); +std::ostream& operator<<(std::ostream& os, const vixl::CPUFeatures& features); + +// This is not a proper C++ iterator type, but it simulates enough of +// ForwardIterator that simple loops can be written. +class CPUFeaturesConstIterator { + public: + CPUFeaturesConstIterator(const CPUFeatures* cpu_features = NULL, + CPUFeatures::Feature start = CPUFeatures::kNone) + : cpu_features_(cpu_features), feature_(start) { + VIXL_ASSERT(IsValid()); + } + + bool operator==(const CPUFeaturesConstIterator& other) const; + bool operator!=(const CPUFeaturesConstIterator& other) const { + return !(*this == other); + } + CPUFeatures::Feature operator++(); + CPUFeatures::Feature operator++(int); + + CPUFeatures::Feature operator*() const { + VIXL_ASSERT(IsValid()); + return feature_; + } + + // For proper support of C++'s simplest "Iterator" concept, this class would + // have to define member types (such as CPUFeaturesIterator::pointer) to make + // it appear as if it iterates over Feature objects in memory. That is, we'd + // need CPUFeatures::iterator to behave like std::vector::iterator. + // This is at least partially possible -- the std::vector specialisation + // does something similar -- but it doesn't seem worthwhile for a + // special-purpose debug helper, so they are omitted here. + private: + const CPUFeatures* cpu_features_; + CPUFeatures::Feature feature_; + + bool IsValid() const { + return ((cpu_features_ == NULL) && (feature_ == CPUFeatures::kNone)) || + cpu_features_->Has(feature_); + } +}; + +// A convenience scope for temporarily modifying a CPU features object. This +// allows features to be enabled for short sequences. +// +// Expected usage: +// +// { +// CPUFeaturesScope cpu(&masm, CPUFeatures::kCRC32); +// // This scope can now use CRC32, as well as anything else that was enabled +// // before the scope. +// +// ... +// +// // At the end of the scope, the original CPU features are restored. +// } +class CPUFeaturesScope { + public: + // Start a CPUFeaturesScope on any object that implements + // `CPUFeatures* GetCPUFeatures()`. + template + explicit CPUFeaturesScope(T* cpu_features_wrapper, + CPUFeatures::Feature feature0 = CPUFeatures::kNone, + CPUFeatures::Feature feature1 = CPUFeatures::kNone, + CPUFeatures::Feature feature2 = CPUFeatures::kNone, + CPUFeatures::Feature feature3 = CPUFeatures::kNone) + : cpu_features_(cpu_features_wrapper->GetCPUFeatures()), + old_features_(*cpu_features_) { + cpu_features_->Combine(feature0, feature1, feature2, feature3); + } + + template + CPUFeaturesScope(T* cpu_features_wrapper, const CPUFeatures& other) + : cpu_features_(cpu_features_wrapper->GetCPUFeatures()), + old_features_(*cpu_features_) { + cpu_features_->Combine(other); + } + + ~CPUFeaturesScope() { *cpu_features_ = old_features_; } + + // For advanced usage, the CPUFeatures object can be accessed directly. + // The scope will restore the original state when it ends. + + CPUFeatures* GetCPUFeatures() const { return cpu_features_; } + + void SetCPUFeatures(const CPUFeatures& cpu_features) { + *cpu_features_ = cpu_features; + } + + private: + CPUFeatures* const cpu_features_; + const CPUFeatures old_features_; +}; + + +} // namespace vixl + +#endif // VIXL_CPU_FEATURES_H diff --git a/module/src/main/cpp/whale/src/assembler/vixl/globals-vixl.h b/module/src/main/cpp/whale/src/assembler/vixl/globals-vixl.h new file mode 100644 index 00000000..9e4229a4 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/globals-vixl.h @@ -0,0 +1,283 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_GLOBALS_H +#define VIXL_GLOBALS_H + +// Get standard C99 macros for integer types. +#ifndef __STDC_CONSTANT_MACROS +#define __STDC_CONSTANT_MACROS +#endif + +#ifndef __STDC_LIMIT_MACROS +#define __STDC_LIMIT_MACROS +#endif + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +extern "C" { +#include +#include +} + +#include +#include +#include +#include +#include + +#include "platform-vixl.h" + +#ifdef VIXL_NEGATIVE_TESTING +#include +#include +#include +#endif + +namespace vixl { + +typedef uint8_t byte; + +const int KBytes = 1024; +const int MBytes = 1024 * KBytes; + +const int kBitsPerByte = 8; + +template +struct Unsigned; + +template <> +struct Unsigned<32> { + typedef uint32_t type; +}; + +template <> +struct Unsigned<64> { + typedef uint64_t type; +}; + +} // namespace vixl + +// Detect the host's pointer size. +#if (UINTPTR_MAX == UINT32_MAX) +#define VIXL_HOST_POINTER_32 +#elif (UINTPTR_MAX == UINT64_MAX) +#define VIXL_HOST_POINTER_64 +#else +#error "Unsupported host pointer size." +#endif + +#ifdef VIXL_NEGATIVE_TESTING +#define VIXL_ABORT() \ + do { \ + std::ostringstream oss; \ + oss << "Aborting in " << __FILE__ << ", line " << __LINE__ << std::endl; \ + throw std::runtime_error(oss.str()); \ + } while (false) +#define VIXL_ABORT_WITH_MSG(msg) \ + do { \ + std::ostringstream oss; \ + oss << (msg) << "in " << __FILE__ << ", line " << __LINE__ << std::endl; \ + throw std::runtime_error(oss.str()); \ + } while (false) +#define VIXL_CHECK(condition) \ + do { \ + if (!(condition)) { \ + std::ostringstream oss; \ + oss << "Assertion failed (" #condition ")\nin "; \ + oss << __FILE__ << ", line " << __LINE__ << std::endl; \ + throw std::runtime_error(oss.str()); \ + } \ + } while (false) +#else +#define VIXL_ABORT() \ + do { \ + printf("Aborting in %s, line %i\n", __FILE__, __LINE__); \ + abort(); \ + } while (false) +#define VIXL_ABORT_WITH_MSG(msg) \ + do { \ + printf("%sin %s, line %i\n", (msg), __FILE__, __LINE__); \ + abort(); \ + } while (false) +#define VIXL_CHECK(condition) \ + do { \ + if (!(condition)) { \ + printf("Assertion failed (%s)\nin %s, line %i\n", \ + #condition, \ + __FILE__, \ + __LINE__); \ + abort(); \ + } \ + } while (false) +#endif +#ifdef VIXL_DEBUG +#define VIXL_ASSERT(condition) VIXL_CHECK(condition) +#define VIXL_UNIMPLEMENTED() \ + do { \ + VIXL_ABORT_WITH_MSG("UNIMPLEMENTED "); \ + } while (false) +#define VIXL_UNREACHABLE() \ + do { \ + VIXL_ABORT_WITH_MSG("UNREACHABLE "); \ + } while (false) +#else +#define VIXL_ASSERT(condition) ((void)0) +#define VIXL_UNIMPLEMENTED() ((void)0) +#define VIXL_UNREACHABLE() ((void)0) +#endif +// This is not as powerful as template based assertions, but it is simple. +// It assumes that the descriptions are unique. If this starts being a problem, +// we can switch to a different implemention. +#define VIXL_CONCAT(a, b) a##b +#if __cplusplus >= 201103L +#define VIXL_STATIC_ASSERT_LINE(line_unused, condition, message) \ + static_assert(condition, message) +#else +#define VIXL_STATIC_ASSERT_LINE(line, condition, message_unused) \ + typedef char VIXL_CONCAT(STATIC_ASSERT_LINE_, line)[(condition) ? 1 : -1] \ + __attribute__((unused)) +#endif +#define VIXL_STATIC_ASSERT(condition) \ + VIXL_STATIC_ASSERT_LINE(__LINE__, condition, "") +#define VIXL_STATIC_ASSERT_MESSAGE(condition, message) \ + VIXL_STATIC_ASSERT_LINE(__LINE__, condition, message) + +#define VIXL_WARNING(message) \ + do { \ + printf("WARNING in %s, line %i: %s", __FILE__, __LINE__, message); \ + } while (false) + +template +inline void USE(const T1&) {} + +template +inline void USE(const T1&, const T2&) {} + +template +inline void USE(const T1&, const T2&, const T3&) {} + +template +inline void USE(const T1&, const T2&, const T3&, const T4&) {} + +#define VIXL_ALIGNMENT_EXCEPTION() \ + do { \ + VIXL_ABORT_WITH_MSG("ALIGNMENT EXCEPTION\t"); \ + } while (0) + +// The clang::fallthrough attribute is used along with the Wimplicit-fallthrough +// argument to annotate intentional fall-through between switch labels. +// For more information please refer to: +// http://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough +#ifndef __has_warning +#define __has_warning(x) 0 +#endif + +// Fallthrough annotation for Clang and C++11(201103L). +#if __has_warning("-Wimplicit-fallthrough") && __cplusplus >= 201103L +#define VIXL_FALLTHROUGH() [[clang::fallthrough]] +// Fallthrough annotation for GCC >= 7. +#elif __GNUC__ >= 7 +#define VIXL_FALLTHROUGH() __attribute__((fallthrough)) +#else +#define VIXL_FALLTHROUGH() \ + do { \ + } while (0) +#endif + +#if __cplusplus >= 201103L +#define VIXL_NO_RETURN [[noreturn]] +#else +#define VIXL_NO_RETURN __attribute__((noreturn)) +#endif +#ifdef VIXL_DEBUG +#define VIXL_NO_RETURN_IN_DEBUG_MODE VIXL_NO_RETURN +#else +#define VIXL_NO_RETURN_IN_DEBUG_MODE +#endif + +#if __cplusplus >= 201103L +#define VIXL_OVERRIDE override +#else +#define VIXL_OVERRIDE +#endif + +// Some functions might only be marked as "noreturn" for the DEBUG build. This +// macro should be used for such cases (for more details see what +// VIXL_UNREACHABLE expands to). +#ifdef VIXL_DEBUG +#define VIXL_DEBUG_NO_RETURN VIXL_NO_RETURN +#else +#define VIXL_DEBUG_NO_RETURN +#endif + +#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64 +#ifndef VIXL_AARCH64_GENERATE_SIMULATOR_CODE +#define VIXL_AARCH64_GENERATE_SIMULATOR_CODE 1 +#endif +#else +#ifndef VIXL_AARCH64_GENERATE_SIMULATOR_CODE +#define VIXL_AARCH64_GENERATE_SIMULATOR_CODE 0 +#endif +#if VIXL_AARCH64_GENERATE_SIMULATOR_CODE +#warning "Generating Simulator instructions without Simulator support." +#endif +#endif + +// We do not have a simulator for AArch32, although we can pretend we do so that +// tests that require running natively can be skipped. +#ifndef __arm__ +#define VIXL_INCLUDE_SIMULATOR_AARCH32 +#ifndef VIXL_AARCH32_GENERATE_SIMULATOR_CODE +#define VIXL_AARCH32_GENERATE_SIMULATOR_CODE 1 +#endif +#else +#ifndef VIXL_AARCH32_GENERATE_SIMULATOR_CODE +#define VIXL_AARCH32_GENERATE_SIMULATOR_CODE 0 +#endif +#endif + +#ifdef USE_SIMULATOR +#error "Please see the release notes for USE_SIMULATOR." +#endif + +// Target Architecture/ISA +#ifdef VIXL_INCLUDE_TARGET_A64 +#define VIXL_INCLUDE_TARGET_AARCH64 +#endif + +//#if defined(VIXL_INCLUDE_TARGET_A32) && defined(VIXL_INCLUDE_TARGET_T32) +#define VIXL_INCLUDE_TARGET_AARCH32 +//#elif defined(VIXL_INCLUDE_TARGET_A32) +//#define VIXL_INCLUDE_TARGET_A32_ONLY +//#else +//#define VIXL_INCLUDE_TARGET_T32_ONLY +//#endif + + +#endif // VIXL_GLOBALS_H diff --git a/module/src/main/cpp/whale/src/assembler/vixl/invalset-vixl.h b/module/src/main/cpp/whale/src/assembler/vixl/invalset-vixl.h new file mode 100644 index 00000000..9f9425b3 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/invalset-vixl.h @@ -0,0 +1,915 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_INVALSET_H_ +#define VIXL_INVALSET_H_ + +#include + +#include +#include + +#include "globals-vixl.h" + +namespace vixl { + +// We define a custom data structure template and its iterator as `std` +// containers do not fit the performance requirements for some of our use cases. +// +// The structure behaves like an iterable unordered set with special properties +// and restrictions. "InvalSet" stands for "Invalidatable Set". +// +// Restrictions and requirements: +// - Adding an element already present in the set is illegal. In debug mode, +// this is checked at insertion time. +// - The templated class `ElementType` must provide comparison operators so that +// `std::sort()` can be used. +// - A key must be available to represent invalid elements. +// - Elements with an invalid key must compare higher or equal to any other +// element. +// +// Use cases and performance considerations: +// Our use cases present two specificities that allow us to design this +// structure to provide fast insertion *and* fast search and deletion +// operations: +// - Elements are (generally) inserted in order (sorted according to their key). +// - A key is available to mark elements as invalid (deleted). +// The backing `std::vector` allows for fast insertions. When +// searching for an element we ensure the elements are sorted (this is generally +// the case) and perform a binary search. When deleting an element we do not +// free the associated memory immediately. Instead, an element to be deleted is +// marked with the 'invalid' key. Other methods of the container take care of +// ignoring entries marked as invalid. +// To avoid the overhead of the `std::vector` container when only few entries +// are used, a number of elements are preallocated. + +// 'ElementType' and 'KeyType' are respectively the types of the elements and +// their key. The structure only reclaims memory when safe to do so, if the +// number of elements that can be reclaimed is greater than `RECLAIM_FROM` and +// greater than ` / RECLAIM_FACTOR. +// clang-format off +#define TEMPLATE_INVALSET_P_DECL \ + class ElementType, \ + unsigned N_PREALLOCATED_ELEMENTS, \ + class KeyType, \ + KeyType INVALID_KEY, \ + size_t RECLAIM_FROM, \ + unsigned RECLAIM_FACTOR +// clang-format on + +#define TEMPLATE_INVALSET_P_DEF \ + ElementType, N_PREALLOCATED_ELEMENTS, KeyType, INVALID_KEY, RECLAIM_FROM, \ + RECLAIM_FACTOR + +template +class InvalSetIterator; // Forward declaration. + +template +class InvalSet { + public: + InvalSet(); + ~InvalSet(); + + static const size_t kNPreallocatedElements = N_PREALLOCATED_ELEMENTS; + static const KeyType kInvalidKey = INVALID_KEY; + + // C++ STL iterator interface. + typedef InvalSetIterator > iterator; + iterator begin(); + iterator end(); + + // It is illegal to insert an element already present in the set. + void insert(const ElementType& element); + + // Looks for the specified element in the set and - if found - deletes it. + // The return value is the number of elements erased: either 0 or 1. + size_t erase(const ElementType& element); + + // This indicates the number of (valid) elements stored in this set. + size_t size() const; + + // Returns true if no elements are stored in the set. + // Note that this does not mean the the backing storage is empty: it can still + // contain invalid elements. + bool empty() const; + + void clear(); + + const ElementType GetMinElement(); + + // This returns the key of the minimum element in the set. + KeyType GetMinElementKey(); + + static bool IsValid(const ElementType& element); + static KeyType GetKey(const ElementType& element); + static void SetKey(ElementType* element, KeyType key); + + typedef ElementType _ElementType; + typedef KeyType _KeyType; + + protected: + // Returns a pointer to the element in vector_ if it was found, or NULL + // otherwise. + ElementType* Search(const ElementType& element); + + // The argument *must* point to an element stored in *this* set. + // This function is not allowed to move elements in the backing vector + // storage. + void EraseInternal(ElementType* element); + + // The elements in the range searched must be sorted. + ElementType* BinarySearch(const ElementType& element, + ElementType* start, + ElementType* end) const; + + // Sort the elements. + enum SortType { + // The 'hard' version guarantees that invalid elements are moved to the end + // of the container. + kHardSort, + // The 'soft' version only guarantees that the elements will be sorted. + // Invalid elements may still be present anywhere in the set. + kSoftSort + }; + void Sort(SortType sort_type); + + // Delete the elements that have an invalid key. The complexity is linear + // with the size of the vector. + void Clean(); + + const ElementType Front() const; + const ElementType Back() const; + + // Delete invalid trailing elements and return the last valid element in the + // set. + const ElementType CleanBack(); + + // Returns a pointer to the start or end of the backing storage. + const ElementType* StorageBegin() const; + const ElementType* StorageEnd() const; + ElementType* StorageBegin(); + ElementType* StorageEnd(); + + // Returns the index of the element within the backing storage. The element + // must belong to the backing storage. + size_t GetElementIndex(const ElementType* element) const; + + // Returns the element at the specified index in the backing storage. + const ElementType* GetElementAt(size_t index) const; + ElementType* GetElementAt(size_t index); + + static const ElementType* GetFirstValidElement(const ElementType* from, + const ElementType* end); + + void CacheMinElement(); + const ElementType GetCachedMinElement() const; + + bool ShouldReclaimMemory() const; + void ReclaimMemory(); + + bool IsUsingVector() const { return vector_ != NULL; } + void SetSorted(bool sorted) { sorted_ = sorted; } + + // We cache some data commonly required by users to improve performance. + // We cannot cache pointers to elements as we do not control the backing + // storage. + bool valid_cached_min_; + size_t cached_min_index_; // Valid iff `valid_cached_min_` is true. + KeyType cached_min_key_; // Valid iff `valid_cached_min_` is true. + + // Indicates whether the elements are sorted. + bool sorted_; + + // This represents the number of (valid) elements in this set. + size_t size_; + + // The backing storage is either the array of preallocated elements or the + // vector. The structure starts by using the preallocated elements, and + // transitions (permanently) to using the vector once more than + // kNPreallocatedElements are used. + // Elements are only invalidated when using the vector. The preallocated + // storage always only contains valid elements. + ElementType preallocated_[kNPreallocatedElements]; + std::vector* vector_; + + // Iterators acquire and release this monitor. While a set is acquired, + // certain operations are illegal to ensure that the iterator will + // correctly iterate over the elements in the set. + int monitor_; +#ifdef VIXL_DEBUG + int monitor() const { return monitor_; } + void Acquire() { monitor_++; } + void Release() { + monitor_--; + VIXL_ASSERT(monitor_ >= 0); + } +#endif + + private: +// The copy constructor and assignment operator are not used and the defaults +// are unsafe, so disable them (without an implementation). +#if __cplusplus >= 201103L + InvalSet(const InvalSet& other) = delete; + InvalSet operator=(const InvalSet& other) = delete; +#else + InvalSet(const InvalSet& other); + InvalSet operator=(const InvalSet& other); +#endif + + friend class InvalSetIterator >; +}; + + +template +class InvalSetIterator : public std::iterator { + private: + // Redefine types to mirror the associated set types. + typedef typename S::_ElementType ElementType; + typedef typename S::_KeyType KeyType; + + public: + explicit InvalSetIterator(S* inval_set = NULL); + + // This class implements the standard copy-swap idiom. + ~InvalSetIterator(); + InvalSetIterator(const InvalSetIterator& other); + InvalSetIterator& operator=(InvalSetIterator other); +#if __cplusplus >= 201103L + InvalSetIterator(InvalSetIterator&& other) noexcept; +#endif + + friend void swap(InvalSetIterator& a, InvalSetIterator& b) { + using std::swap; + swap(a.using_vector_, b.using_vector_); + swap(a.index_, b.index_); + swap(a.inval_set_, b.inval_set_); + } + + // Return true if the iterator is at the end of the set. + bool Done() const; + + // Move this iterator to the end of the set. + void Finish(); + + // Delete the current element and advance the iterator to point to the next + // element. + void DeleteCurrentAndAdvance(); + + static bool IsValid(const ElementType& element); + static KeyType GetKey(const ElementType& element); + + // Extra helpers to support the forward-iterator interface. + InvalSetIterator& operator++(); // Pre-increment. + InvalSetIterator operator++(int); // Post-increment. + bool operator==(const InvalSetIterator& rhs) const; + bool operator!=(const InvalSetIterator& rhs) const { + return !(*this == rhs); + } + ElementType& operator*() { return *Current(); } + const ElementType& operator*() const { return *Current(); } + ElementType* operator->() { return Current(); } + const ElementType* operator->() const { return Current(); } + + protected: + void MoveToValidElement(); + + // Indicates if the iterator is looking at the vector or at the preallocated + // elements. + bool using_vector_; + // Used when looking at the preallocated elements, or in debug mode when using + // the vector to track how many times the iterator has advanced. + size_t index_; + typename std::vector::iterator iterator_; + S* inval_set_; + + // TODO: These helpers are deprecated and will be removed in future versions + // of VIXL. + ElementType* Current() const; + void Advance(); +}; + + +template +InvalSet::InvalSet() + : valid_cached_min_(false), sorted_(true), size_(0), vector_(NULL) { +#ifdef VIXL_DEBUG + monitor_ = 0; +#endif +} + + +template +InvalSet::~InvalSet() { + VIXL_ASSERT(monitor_ == 0); + delete vector_; +} + + +template +typename InvalSet::iterator +InvalSet::begin() { + return iterator(this); +} + + +template +typename InvalSet::iterator +InvalSet::end() { + iterator end(this); + end.Finish(); + return end; +} + + +template +void InvalSet::insert(const ElementType& element) { + VIXL_ASSERT(monitor() == 0); + VIXL_ASSERT(IsValid(element)); + VIXL_ASSERT(Search(element) == NULL); + SetSorted(empty() || (sorted_ && (element > CleanBack()))); + if (IsUsingVector()) { + vector_->push_back(element); + } else { + if (size_ < kNPreallocatedElements) { + preallocated_[size_] = element; + } else { + // Transition to using the vector. + vector_ = + new std::vector(preallocated_, preallocated_ + size_); + vector_->push_back(element); + } + } + size_++; + + if (valid_cached_min_ && (element < GetMinElement())) { + cached_min_index_ = IsUsingVector() ? vector_->size() - 1 : size_ - 1; + cached_min_key_ = GetKey(element); + valid_cached_min_ = true; + } + + if (ShouldReclaimMemory()) { + ReclaimMemory(); + } +} + + +template +size_t InvalSet::erase(const ElementType& element) { + VIXL_ASSERT(monitor() == 0); + VIXL_ASSERT(IsValid(element)); + ElementType* local_element = Search(element); + if (local_element != NULL) { + EraseInternal(local_element); + return 1; + } + return 0; +} + + +template +ElementType* InvalSet::Search( + const ElementType& element) { + VIXL_ASSERT(monitor() == 0); + if (empty()) { + return NULL; + } + if (ShouldReclaimMemory()) { + ReclaimMemory(); + } + if (!sorted_) { + Sort(kHardSort); + } + if (!valid_cached_min_) { + CacheMinElement(); + } + return BinarySearch(element, GetElementAt(cached_min_index_), StorageEnd()); +} + + +template +size_t InvalSet::size() const { + return size_; +} + + +template +bool InvalSet::empty() const { + return size_ == 0; +} + + +template +void InvalSet::clear() { + VIXL_ASSERT(monitor() == 0); + size_ = 0; + if (IsUsingVector()) { + vector_->clear(); + } + SetSorted(true); + valid_cached_min_ = false; +} + + +template +const ElementType InvalSet::GetMinElement() { + VIXL_ASSERT(monitor() == 0); + VIXL_ASSERT(!empty()); + CacheMinElement(); + return *GetElementAt(cached_min_index_); +} + + +template +KeyType InvalSet::GetMinElementKey() { + VIXL_ASSERT(monitor() == 0); + if (valid_cached_min_) { + return cached_min_key_; + } else { + return GetKey(GetMinElement()); + } +} + + +template +bool InvalSet::IsValid(const ElementType& element) { + return GetKey(element) != kInvalidKey; +} + + +template +void InvalSet::EraseInternal(ElementType* element) { + // Note that this function must be safe even while an iterator has acquired + // this set. + VIXL_ASSERT(element != NULL); + size_t deleted_index = GetElementIndex(element); + if (IsUsingVector()) { + VIXL_ASSERT((&(vector_->front()) <= element) && + (element <= &(vector_->back()))); + SetKey(element, kInvalidKey); + } else { + VIXL_ASSERT((preallocated_ <= element) && + (element < (preallocated_ + kNPreallocatedElements))); + ElementType* end = preallocated_ + kNPreallocatedElements; + size_t copy_size = sizeof(*element) * (end - element - 1); + memmove(element, element + 1, copy_size); + } + size_--; + + if (valid_cached_min_ && (deleted_index == cached_min_index_)) { + if (sorted_ && !empty()) { + const ElementType* min = GetFirstValidElement(element, StorageEnd()); + cached_min_index_ = GetElementIndex(min); + cached_min_key_ = GetKey(*min); + valid_cached_min_ = true; + } else { + valid_cached_min_ = false; + } + } +} + + +template +ElementType* InvalSet::BinarySearch( + const ElementType& element, ElementType* start, ElementType* end) const { + if (start == end) { + return NULL; + } + VIXL_ASSERT(sorted_); + VIXL_ASSERT(start < end); + VIXL_ASSERT(!empty()); + + // Perform a binary search through the elements while ignoring invalid + // elements. + ElementType* elements = start; + size_t low = 0; + size_t high = (end - start) - 1; + while (low < high) { + // Find valid bounds. + while (!IsValid(elements[low]) && (low < high)) ++low; + while (!IsValid(elements[high]) && (low < high)) --high; + VIXL_ASSERT(low <= high); + // Avoid overflow when computing the middle index. + size_t middle = low + (high - low) / 2; + if ((middle == low) || (middle == high)) { + break; + } + while ((middle < high - 1) && !IsValid(elements[middle])) ++middle; + while ((low + 1 < middle) && !IsValid(elements[middle])) --middle; + if (!IsValid(elements[middle])) { + break; + } + if (elements[middle] < element) { + low = middle; + } else { + high = middle; + } + } + + if (elements[low] == element) return &elements[low]; + if (elements[high] == element) return &elements[high]; + return NULL; +} + + +template +void InvalSet::Sort(SortType sort_type) { + if (sort_type == kSoftSort) { + if (sorted_) { + return; + } + } + VIXL_ASSERT(monitor() == 0); + if (empty()) { + return; + } + + Clean(); + std::sort(StorageBegin(), StorageEnd()); + + SetSorted(true); + cached_min_index_ = 0; + cached_min_key_ = GetKey(Front()); + valid_cached_min_ = true; +} + + +template +void InvalSet::Clean() { + VIXL_ASSERT(monitor() == 0); + if (empty() || !IsUsingVector()) { + return; + } + // Manually iterate through the vector storage to discard invalid elements. + ElementType* start = &(vector_->front()); + ElementType* end = start + vector_->size(); + ElementType* c = start; + ElementType* first_invalid; + ElementType* first_valid; + ElementType* next_invalid; + + while ((c < end) && IsValid(*c)) c++; + first_invalid = c; + + while (c < end) { + while ((c < end) && !IsValid(*c)) c++; + first_valid = c; + while ((c < end) && IsValid(*c)) c++; + next_invalid = c; + + ptrdiff_t n_moved_elements = (next_invalid - first_valid); + memmove(first_invalid, first_valid, n_moved_elements * sizeof(*c)); + first_invalid = first_invalid + n_moved_elements; + c = next_invalid; + } + + // Delete the trailing invalid elements. + vector_->erase(vector_->begin() + (first_invalid - start), vector_->end()); + VIXL_ASSERT(vector_->size() == size_); + + if (sorted_) { + valid_cached_min_ = true; + cached_min_index_ = 0; + cached_min_key_ = GetKey(*GetElementAt(0)); + } else { + valid_cached_min_ = false; + } +} + + +template +const ElementType InvalSet::Front() const { + VIXL_ASSERT(!empty()); + return IsUsingVector() ? vector_->front() : preallocated_[0]; +} + + +template +const ElementType InvalSet::Back() const { + VIXL_ASSERT(!empty()); + return IsUsingVector() ? vector_->back() : preallocated_[size_ - 1]; +} + + +template +const ElementType InvalSet::CleanBack() { + VIXL_ASSERT(monitor() == 0); + if (IsUsingVector()) { + // Delete the invalid trailing elements. + typename std::vector::reverse_iterator it = vector_->rbegin(); + while (!IsValid(*it)) { + it++; + } + vector_->erase(it.base(), vector_->end()); + } + return Back(); +} + + +template +const ElementType* InvalSet::StorageBegin() const { + return IsUsingVector() ? &(vector_->front()) : preallocated_; +} + + +template +const ElementType* InvalSet::StorageEnd() const { + return IsUsingVector() ? &(vector_->back()) + 1 : preallocated_ + size_; +} + + +template +ElementType* InvalSet::StorageBegin() { + return IsUsingVector() ? &(vector_->front()) : preallocated_; +} + + +template +ElementType* InvalSet::StorageEnd() { + return IsUsingVector() ? &(vector_->back()) + 1 : preallocated_ + size_; +} + + +template +size_t InvalSet::GetElementIndex( + const ElementType* element) const { + VIXL_ASSERT((StorageBegin() <= element) && (element < StorageEnd())); + return element - StorageBegin(); +} + + +template +const ElementType* InvalSet::GetElementAt( + size_t index) const { + VIXL_ASSERT((IsUsingVector() && (index < vector_->size())) || + (index < size_)); + return StorageBegin() + index; +} + +template +ElementType* InvalSet::GetElementAt(size_t index) { + VIXL_ASSERT((IsUsingVector() && (index < vector_->size())) || + (index < size_)); + return StorageBegin() + index; +} + +template +const ElementType* InvalSet::GetFirstValidElement( + const ElementType* from, const ElementType* end) { + while ((from < end) && !IsValid(*from)) { + from++; + } + return from; +} + + +template +void InvalSet::CacheMinElement() { + VIXL_ASSERT(monitor() == 0); + VIXL_ASSERT(!empty()); + + if (valid_cached_min_) { + return; + } + + if (sorted_) { + const ElementType* min = GetFirstValidElement(StorageBegin(), StorageEnd()); + cached_min_index_ = GetElementIndex(min); + cached_min_key_ = GetKey(*min); + valid_cached_min_ = true; + } else { + Sort(kHardSort); + } + VIXL_ASSERT(valid_cached_min_); +} + + +template +bool InvalSet::ShouldReclaimMemory() const { + if (!IsUsingVector()) { + return false; + } + size_t n_invalid_elements = vector_->size() - size_; + return (n_invalid_elements > RECLAIM_FROM) && + (n_invalid_elements > vector_->size() / RECLAIM_FACTOR); +} + + +template +void InvalSet::ReclaimMemory() { + VIXL_ASSERT(monitor() == 0); + Clean(); +} + + +template +InvalSetIterator::InvalSetIterator(S* inval_set) + : using_vector_((inval_set != NULL) && inval_set->IsUsingVector()), + index_(0), + inval_set_(inval_set) { + if (inval_set != NULL) { + inval_set->Sort(S::kSoftSort); +#ifdef VIXL_DEBUG + inval_set->Acquire(); +#endif + if (using_vector_) { + iterator_ = typename std::vector::iterator( + inval_set_->vector_->begin()); + } + MoveToValidElement(); + } +} + + +template +InvalSetIterator::~InvalSetIterator() { +#ifdef VIXL_DEBUG + if (inval_set_ != NULL) inval_set_->Release(); +#endif +} + + +template +typename S::_ElementType* InvalSetIterator::Current() const { + VIXL_ASSERT(!Done()); + if (using_vector_) { + return &(*iterator_); + } else { + return &(inval_set_->preallocated_[index_]); + } +} + + +template +void InvalSetIterator::Advance() { + ++(*this); +} + + +template +bool InvalSetIterator::Done() const { + if (using_vector_) { + bool done = (iterator_ == inval_set_->vector_->end()); + VIXL_ASSERT(done == (index_ == inval_set_->size())); + return done; + } else { + return index_ == inval_set_->size(); + } +} + + +template +void InvalSetIterator::Finish() { + VIXL_ASSERT(inval_set_->sorted_); + if (using_vector_) { + iterator_ = inval_set_->vector_->end(); + } + index_ = inval_set_->size(); +} + + +template +void InvalSetIterator::DeleteCurrentAndAdvance() { + if (using_vector_) { + inval_set_->EraseInternal(&(*iterator_)); + MoveToValidElement(); + } else { + inval_set_->EraseInternal(inval_set_->preallocated_ + index_); + } +} + + +template +bool InvalSetIterator::IsValid(const ElementType& element) { + return S::IsValid(element); +} + + +template +typename S::_KeyType InvalSetIterator::GetKey(const ElementType& element) { + return S::GetKey(element); +} + + +template +void InvalSetIterator::MoveToValidElement() { + if (using_vector_) { + while ((iterator_ != inval_set_->vector_->end()) && !IsValid(*iterator_)) { + iterator_++; + } + } else { + VIXL_ASSERT(inval_set_->empty() || IsValid(inval_set_->preallocated_[0])); + // Nothing to do. + } +} + + +template +InvalSetIterator::InvalSetIterator(const InvalSetIterator& other) + : using_vector_(other.using_vector_), + index_(other.index_), + inval_set_(other.inval_set_) { +#ifdef VIXL_DEBUG + if (inval_set_ != NULL) inval_set_->Acquire(); +#endif +} + + +#if __cplusplus >= 201103L +template +InvalSetIterator::InvalSetIterator(InvalSetIterator&& other) noexcept + : using_vector_(false), + index_(0), + inval_set_(NULL) { + swap(*this, other); +} +#endif + + +template +InvalSetIterator& InvalSetIterator::operator=(InvalSetIterator other) { + swap(*this, other); + return *this; +} + + +template +bool InvalSetIterator::operator==(const InvalSetIterator& rhs) const { + bool equal = (inval_set_ == rhs.inval_set_); + + // If the inval_set_ matches, using_vector_ must also match. + VIXL_ASSERT(!equal || (using_vector_ == rhs.using_vector_)); + + if (using_vector_) { + equal = equal && (iterator_ == rhs.iterator_); + // In debug mode, index_ is maintained even with using_vector_. + VIXL_ASSERT(!equal || (index_ == rhs.index_)); + } else { + equal = equal && (index_ == rhs.index_); +#ifdef DEBUG + // If not using_vector_, iterator_ should be default-initialised. + typename std::vector::iterator default_iterator; + VIXL_ASSERT(iterator_ == default_iterator); + VIXL_ASSERT(rhs.iterator_ == default_iterator); +#endif + } + return equal; +} + + +template +InvalSetIterator& InvalSetIterator::operator++() { + // Pre-increment. + VIXL_ASSERT(!Done()); + if (using_vector_) { + iterator_++; +#ifdef VIXL_DEBUG + index_++; +#endif + MoveToValidElement(); + } else { + index_++; + } + return *this; +} + + +template +InvalSetIterator InvalSetIterator::operator++(int /* unused */) { + // Post-increment. + VIXL_ASSERT(!Done()); + InvalSetIterator old(*this); + ++(*this); + return old; +} + + +#undef TEMPLATE_INVALSET_P_DECL +#undef TEMPLATE_INVALSET_P_DEF + +} // namespace vixl + +#endif // VIXL_INVALSET_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/macro-assembler-interface.h b/module/src/main/cpp/whale/src/assembler/vixl/macro-assembler-interface.h new file mode 100644 index 00000000..a3194e30 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/macro-assembler-interface.h @@ -0,0 +1,75 @@ +// Copyright 2016, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_MACRO_ASSEMBLER_INTERFACE_H +#define VIXL_MACRO_ASSEMBLER_INTERFACE_H + +#include "assembler-base-vixl.h" + +namespace vixl { + +class MacroAssemblerInterface { + public: + virtual internal::AssemblerBase* AsAssemblerBase() = 0; + + virtual ~MacroAssemblerInterface() {} + + virtual bool AllowMacroInstructions() const = 0; + virtual bool ArePoolsBlocked() const = 0; + + protected: + virtual void SetAllowMacroInstructions(bool allow) = 0; + + virtual void BlockPools() = 0; + virtual void ReleasePools() = 0; + virtual void EnsureEmitPoolsFor(size_t size) = 0; + + // Emit the branch over a literal/veneer pool, and any necessary padding + // before it. + virtual void EmitPoolHeader() = 0; + // When this is called, the label used for branching over the pool is bound. + // This can also generate additional padding, which must correspond to the + // alignment_ value passed to the PoolManager (which needs to keep track of + // the exact size of the generated pool). + virtual void EmitPoolFooter() = 0; + + // Emit n bytes of padding that does not have to be executable. + virtual void EmitPaddingBytes(int n) = 0; + // Emit n bytes of padding that has to be executable. Implementations must + // make sure this is a multiple of the instruction size. + virtual void EmitNopBytes(int n) = 0; + + // The following scopes need access to the above method in order to implement + // pool blocking and temporarily disable the macro-assembler. + friend class ExactAssemblyScope; + friend class EmissionCheckScope; + template + friend class PoolManager; +}; + +} // namespace vixl + +#endif // VIXL_MACRO_ASSEMBLER_INTERFACE_H diff --git a/module/src/main/cpp/whale/src/assembler/vixl/platform-vixl.h b/module/src/main/cpp/whale/src/assembler/vixl/platform-vixl.h new file mode 100644 index 00000000..99f54d0c --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/platform-vixl.h @@ -0,0 +1,39 @@ +// Copyright 2014, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef PLATFORM_H +#define PLATFORM_H + +// Define platform specific functionalities. +extern "C" { +#include +} + +namespace vixl { +inline void HostBreakpoint() { raise(SIGINT); } +} // namespace vixl + +#endif diff --git a/module/src/main/cpp/whale/src/assembler/vixl/pool-manager-impl.h b/module/src/main/cpp/whale/src/assembler/vixl/pool-manager-impl.h new file mode 100644 index 00000000..c49b643f --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/pool-manager-impl.h @@ -0,0 +1,522 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_POOL_MANAGER_IMPL_H_ +#define VIXL_POOL_MANAGER_IMPL_H_ + +#include "pool-manager.h" + +#include +#include "assembler-base-vixl.h" + +namespace vixl { + + +template +T PoolManager::Emit(MacroAssemblerInterface* masm, + T pc, + int num_bytes, + ForwardReference* new_reference, + LocationBase* new_object, + EmitOption option) { + // Make sure that the buffer still has the alignment we think it does. + VIXL_ASSERT(IsAligned(masm->AsAssemblerBase() + ->GetBuffer() + ->GetStartAddress(), + buffer_alignment_)); + + // We should not call this method when the pools are blocked. + VIXL_ASSERT(!IsBlocked()); + if (objects_.empty()) return pc; + + // Emit header. + if (option == kBranchRequired) { + masm->EmitPoolHeader(); + // TODO: The pc at this point might not actually be aligned according to + // alignment_. This is to support the current AARCH32 MacroAssembler which + // does not have a fixed size instruction set. In practice, the pc will be + // aligned to the alignment instructions need for the current instruction + // set, so we do not need to align it here. All other calculations do take + // the alignment into account, which only makes the checkpoint calculations + // more conservative when we use T32. Uncomment the following assertion if + // the AARCH32 MacroAssembler is modified to only support one ISA at the + // time. + // VIXL_ASSERT(pc == AlignUp(pc, alignment_)); + pc += header_size_; + } else { + // If the header is optional, we might need to add some extra padding to + // meet the minimum location of the first object. + if (pc < objects_[0].min_location_) { + int32_t padding = objects_[0].min_location_ - pc; + masm->EmitNopBytes(padding); + pc += padding; + } + } + + PoolObject* existing_object = GetObjectIfTracked(new_object); + + // Go through all objects and emit one by one. + for (objects_iter iter = objects_.begin(); iter != objects_.end();) { + PoolObject& current = *iter; + if (ShouldSkipObject(¤t, + pc, + num_bytes, + new_reference, + new_object, + existing_object)) { + ++iter; + continue; + } + LocationBase* label_base = current.label_base_; + T aligned_pc = AlignUp(pc, current.alignment_); + masm->EmitPaddingBytes(aligned_pc - pc); + pc = aligned_pc; + VIXL_ASSERT(pc >= current.min_location_); + VIXL_ASSERT(pc <= current.max_location_); + // First call SetLocation, which will also resolve the references, and then + // call EmitPoolObject, which might add a new reference. + label_base->SetLocation(masm->AsAssemblerBase(), pc); + label_base->EmitPoolObject(masm); + int object_size = label_base->GetPoolObjectSizeInBytes(); + if (label_base->ShouldDeletePoolObjectOnPlacement()) { + label_base->MarkBound(); + iter = RemoveAndDelete(iter); + } else { + VIXL_ASSERT(!current.label_base_->ShouldDeletePoolObjectOnPlacement()); + current.label_base_->UpdatePoolObject(¤t); + VIXL_ASSERT(current.alignment_ >= label_base->GetPoolObjectAlignment()); + ++iter; + } + pc += object_size; + } + + // Recalculate the checkpoint before emitting the footer. The footer might + // call Bind() which will check if we need to emit. + RecalculateCheckpoint(); + + // Always emit footer - this might add some padding. + masm->EmitPoolFooter(); + pc = AlignUp(pc, alignment_); + + return pc; +} + +template +bool PoolManager::ShouldSkipObject(PoolObject* pool_object, + T pc, + int num_bytes, + ForwardReference* new_reference, + LocationBase* new_object, + PoolObject* existing_object) const { + // We assume that all objects before this have been skipped and all objects + // after this will be emitted, therefore we will emit the whole pool. Add + // the header size and alignment, as well as the number of bytes we are + // planning to emit. + T max_actual_location = pc + num_bytes + max_pool_size_; + + if (new_reference != NULL) { + // If we're adding a new object, also assume that it will have to be emitted + // before the object we are considering to skip. + VIXL_ASSERT(new_object != NULL); + T new_object_alignment = std::max(new_reference->object_alignment_, + new_object->GetPoolObjectAlignment()); + if ((existing_object != NULL) && + (existing_object->alignment_ > new_object_alignment)) { + new_object_alignment = existing_object->alignment_; + } + max_actual_location += + (new_object->GetPoolObjectSizeInBytes() + new_object_alignment - 1); + } + + // Hard limit. + if (max_actual_location >= pool_object->max_location_) return false; + + // Use heuristic. + return (pc < pool_object->skip_until_location_hint_); +} + +template +T PoolManager::UpdateCheckpointForObject(T checkpoint, + const PoolObject* object) { + checkpoint -= object->label_base_->GetPoolObjectSizeInBytes(); + if (checkpoint > object->max_location_) checkpoint = object->max_location_; + checkpoint = AlignDown(checkpoint, object->alignment_); + return checkpoint; +} + +template +static T MaxCheckpoint() { + return std::numeric_limits::max(); +} + +template +static inline bool CheckCurrentPC(T pc, T checkpoint) { + VIXL_ASSERT(pc <= checkpoint); + // We must emit the pools if we are at the checkpoint now. + return pc == checkpoint; +} + +template +static inline bool CheckFuturePC(T pc, T checkpoint) { + // We do not need to emit the pools now if the projected future PC will be + // equal to the checkpoint (we will need to emit the pools then). + return pc > checkpoint; +} + +template +bool PoolManager::MustEmit(T pc, + int num_bytes, + ForwardReference* reference, + LocationBase* label_base) const { + // Check if we are at or past the checkpoint. + if (CheckCurrentPC(pc, checkpoint_)) return true; + + // Check if the future PC will be past the checkpoint. + pc += num_bytes; + if (CheckFuturePC(pc, checkpoint_)) return true; + + // No new reference - nothing to do. + if (reference == NULL) { + VIXL_ASSERT(label_base == NULL); + return false; + } + + if (objects_.empty()) { + // Basic assertions that restrictions on the new (and only) reference are + // possible to satisfy. + VIXL_ASSERT(AlignUp(pc + header_size_, alignment_) >= + reference->min_object_location_); + VIXL_ASSERT(pc <= reference->max_object_location_); + return false; + } + + // Check if the object is already being tracked. + const PoolObject* existing_object = GetObjectIfTracked(label_base); + if (existing_object != NULL) { + // If the existing_object is already in existing_objects_ and its new + // alignment and new location restrictions are not stricter, skip the more + // expensive check. + if ((reference->min_object_location_ <= existing_object->min_location_) && + (reference->max_object_location_ >= existing_object->max_location_) && + (reference->object_alignment_ <= existing_object->alignment_)) { + return false; + } + } + + // Create a temporary object. + PoolObject temp(label_base); + temp.RestrictRange(reference->min_object_location_, + reference->max_object_location_); + temp.RestrictAlignment(reference->object_alignment_); + if (existing_object != NULL) { + temp.RestrictRange(existing_object->min_location_, + existing_object->max_location_); + temp.RestrictAlignment(existing_object->alignment_); + } + + // Check if the new reference can be added after the end of the current pool. + // If yes, we don't need to emit. + T last_reachable = AlignDown(temp.max_location_, temp.alignment_); + const PoolObject& last = objects_.back(); + T after_pool = AlignDown(last.max_location_, last.alignment_) + + last.label_base_->GetPoolObjectSizeInBytes(); + // The current object can be placed at the end of the pool, even if the last + // object is placed at the last possible location. + if (last_reachable >= after_pool) return false; + // The current object can be placed after the code we are about to emit and + // after the existing pool (with a pessimistic size estimate). + if (last_reachable >= pc + num_bytes + max_pool_size_) return false; + + // We're not in a trivial case, so we need to recalculate the checkpoint. + + // Check (conservatively) if we can fit it into the objects_ array, without + // breaking our assumptions. Here we want to recalculate the checkpoint as + // if the new reference was added to the PoolManager but without actually + // adding it (as removing it is non-trivial). + + T checkpoint = MaxCheckpoint(); + // Will temp be the last object in objects_? + if (PoolObjectLessThan(last, temp)) { + checkpoint = UpdateCheckpointForObject(checkpoint, &temp); + if (checkpoint < temp.min_location_) return true; + } + + bool tempNotPlacedYet = true; + for (int i = static_cast(objects_.size()) - 1; i >= 0; --i) { + const PoolObject& current = objects_[i]; + if (tempNotPlacedYet && PoolObjectLessThan(current, temp)) { + checkpoint = UpdateCheckpointForObject(checkpoint, &temp); + if (checkpoint < temp.min_location_) return true; + if (CheckFuturePC(pc, checkpoint)) return true; + tempNotPlacedYet = false; + } + if (current.label_base_ == label_base) continue; + checkpoint = UpdateCheckpointForObject(checkpoint, ¤t); + if (checkpoint < current.min_location_) return true; + if (CheckFuturePC(pc, checkpoint)) return true; + } + // temp is the object with the smallest max_location_. + if (tempNotPlacedYet) { + checkpoint = UpdateCheckpointForObject(checkpoint, &temp); + if (checkpoint < temp.min_location_) return true; + } + + // Take the header into account. + checkpoint -= header_size_; + checkpoint = AlignDown(checkpoint, alignment_); + + return CheckFuturePC(pc, checkpoint); +} + +template +void PoolManager::RecalculateCheckpoint(SortOption sort_option) { + // TODO: Improve the max_pool_size_ estimate by starting from the + // min_location_ of the first object, calculating the end of the pool as if + // all objects were placed starting from there, and in the end adding the + // maximum object alignment found minus one (which is the maximum extra + // padding we would need if we were to relocate the pool to a different + // address). + max_pool_size_ = 0; + + if (objects_.empty()) { + checkpoint_ = MaxCheckpoint(); + return; + } + + // Sort objects by their max_location_. + if (sort_option == kSortRequired) { + std::sort(objects_.begin(), objects_.end(), PoolObjectLessThan); + } + + // Add the header size and header and footer max alignment to the maximum + // pool size. + max_pool_size_ += header_size_ + 2 * (alignment_ - 1); + + T checkpoint = MaxCheckpoint(); + int last_object_index = static_cast(objects_.size()) - 1; + for (int i = last_object_index; i >= 0; --i) { + // Bring back the checkpoint by the size of the current object, unless + // we need to bring it back more, then align. + PoolObject& current = objects_[i]; + checkpoint = UpdateCheckpointForObject(checkpoint, ¤t); + VIXL_ASSERT(checkpoint >= current.min_location_); + max_pool_size_ += (current.alignment_ - 1 + + current.label_base_->GetPoolObjectSizeInBytes()); + } + // Take the header into account. + checkpoint -= header_size_; + checkpoint = AlignDown(checkpoint, alignment_); + + // Update the checkpoint of the pool manager. + checkpoint_ = checkpoint; + + // NOTE: To handle min_location_ in the generic case, we could make a second + // pass of the objects_ vector, increasing the checkpoint as needed, while + // maintaining the alignment requirements. + // It should not be possible to have any issues with min_location_ with actual + // code, since there should always be some kind of branch over the pool, + // whether introduced by the pool emission or by the user, which will make + // sure the min_location_ requirement is satisfied. It's possible that the + // user could emit code in the literal pool and intentionally load the first + // value and then fall-through into the pool, but that is not a supported use + // of VIXL and we will assert in that case. +} + +template +bool PoolManager::PoolObjectLessThan(const PoolObject& a, + const PoolObject& b) { + if (a.max_location_ != b.max_location_) + return (a.max_location_ < b.max_location_); + int a_size = a.label_base_->GetPoolObjectSizeInBytes(); + int b_size = b.label_base_->GetPoolObjectSizeInBytes(); + if (a_size != b_size) return (a_size < b_size); + if (a.alignment_ != b.alignment_) return (a.alignment_ < b.alignment_); + if (a.min_location_ != b.min_location_) + return (a.min_location_ < b.min_location_); + return false; +} + +template +void PoolManager::AddObjectReference(const ForwardReference* reference, + LocationBase* label_base) { + VIXL_ASSERT(reference->object_alignment_ <= buffer_alignment_); + VIXL_ASSERT(label_base->GetPoolObjectAlignment() <= buffer_alignment_); + + PoolObject* object = GetObjectIfTracked(label_base); + + if (object == NULL) { + PoolObject new_object(label_base); + new_object.RestrictRange(reference->min_object_location_, + reference->max_object_location_); + new_object.RestrictAlignment(reference->object_alignment_); + Insert(new_object); + } else { + object->RestrictRange(reference->min_object_location_, + reference->max_object_location_); + object->RestrictAlignment(reference->object_alignment_); + + // Move the object, if needed. + if (objects_.size() != 1) { + PoolObject new_object(*object); + ptrdiff_t distance = std::distance(objects_.data(), object); + objects_.erase(objects_.begin() + distance); + Insert(new_object); + } + } + // No need to sort, we inserted the object in an already sorted array. + RecalculateCheckpoint(kNoSortRequired); +} + +template +void PoolManager::Insert(const PoolObject& new_object) { + bool inserted = false; + // Place the object in the right position. + for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) { + PoolObject& current = *iter; + if (!PoolObjectLessThan(current, new_object)) { + objects_.insert(iter, new_object); + inserted = true; + break; + } + } + if (!inserted) { + objects_.push_back(new_object); + } +} + +template +void PoolManager::RemoveAndDelete(PoolObject* object) { + for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) { + PoolObject& current = *iter; + if (current.label_base_ == object->label_base_) { + (void)RemoveAndDelete(iter); + return; + } + } + VIXL_UNREACHABLE(); +} + +template +typename PoolManager::objects_iter PoolManager::RemoveAndDelete( + objects_iter iter) { + PoolObject& object = *iter; + LocationBase* label_base = object.label_base_; + + // Check if we also need to delete the LocationBase object. + if (label_base->ShouldBeDeletedOnPoolManagerDestruction()) { + delete_on_destruction_.push_back(label_base); + } + if (label_base->ShouldBeDeletedOnPlacementByPoolManager()) { + VIXL_ASSERT(!label_base->ShouldBeDeletedOnPoolManagerDestruction()); + delete label_base; + } + + return objects_.erase(iter); +} + +template +T PoolManager::Bind(MacroAssemblerInterface* masm, + LocationBase* object, + T location) { + PoolObject* existing_object = GetObjectIfTracked(object); + int alignment; + T min_location; + if (existing_object == NULL) { + alignment = object->GetMaxAlignment(); + min_location = object->GetMinLocation(); + } else { + alignment = existing_object->alignment_; + min_location = existing_object->min_location_; + } + + // Align if needed, and add necessary padding to reach the min_location_. + T aligned_location = AlignUp(location, alignment); + masm->EmitNopBytes(aligned_location - location); + location = aligned_location; + while (location < min_location) { + masm->EmitNopBytes(alignment); + location += alignment; + } + + object->SetLocation(masm->AsAssemblerBase(), location); + object->MarkBound(); + + if (existing_object != NULL) { + RemoveAndDelete(existing_object); + // No need to sort, we removed the object from a sorted array. + RecalculateCheckpoint(kNoSortRequired); + } + + // We assume that the maximum padding we can possibly add here is less + // than the header alignment - hence that we're not going to go past our + // checkpoint. + VIXL_ASSERT(!CheckFuturePC(location, checkpoint_)); + return location; +} + +template +void PoolManager::Release(T pc) { + USE(pc); + if (--monitor_ == 0) { + // Ensure the pool has not been blocked for too long. + VIXL_ASSERT(pc <= checkpoint_); + } +} + +template +PoolManager::~PoolManager() { +#ifdef VIXL_DEBUG + // Check for unbound objects. + for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) { + // There should not be any bound objects left in the pool. For unbound + // objects, we will check in the destructor of the object itself. + VIXL_ASSERT(!(*iter).label_base_->IsBound()); + } +#endif + // Delete objects the pool manager owns. + for (typename std::vector *>::iterator + iter = delete_on_destruction_.begin(), + end = delete_on_destruction_.end(); + iter != end; + ++iter) { + delete *iter; + } +} + +template +int PoolManager::GetPoolSizeForTest() const { + // Iterate over objects and return their cumulative size. This does not take + // any padding into account, just the size of the objects themselves. + int size = 0; + for (const_objects_iter iter = objects_.begin(); iter != objects_.end(); + ++iter) { + size += (*iter).label_base_->GetPoolObjectSizeInBytes(); + } + return size; +} +} + +#endif // VIXL_POOL_MANAGER_IMPL_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/pool-manager.h b/module/src/main/cpp/whale/src/assembler/vixl/pool-manager.h new file mode 100644 index 00000000..b5cb867b --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/pool-manager.h @@ -0,0 +1,555 @@ +// Copyright 2017, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_POOL_MANAGER_H_ +#define VIXL_POOL_MANAGER_H_ + +#include + +#include +#include +#include +#include + +#include "globals-vixl.h" +#include "macro-assembler-interface.h" +#include "utils-vixl.h" + +namespace vixl { + +class TestPoolManager; + +// There are four classes declared in this header file: +// PoolManager, PoolObject, ForwardReference and LocationBase. + +// The PoolManager manages both literal and veneer pools, and is designed to be +// shared between AArch32 and AArch64. A pool is represented as an abstract +// collection of references to objects. The manager does not need to know +// architecture-specific details about literals and veneers; the actual +// emission of the pool objects is delegated. +// +// Literal and Label will derive from LocationBase. The MacroAssembler will +// create these objects as instructions that reference pool objects are +// encountered, and ask the PoolManager to track them. The PoolManager will +// create an internal PoolObject object for each object derived from +// LocationBase. Some of these PoolObject objects will be deleted when placed +// (e.g. the ones corresponding to Literals), whereas others will be updated +// with a new range when placed (e.g. Veneers) and deleted when Bind() is +// called on the PoolManager with their corresponding object as a parameter. +// +// A ForwardReference represents a reference to a PoolObject that will be +// placed later in the instruction stream. Each ForwardReference may only refer +// to one PoolObject, but many ForwardReferences may refer to the same +// object. +// +// A PoolObject represents an object that has not yet been placed. The final +// location of a PoolObject (and hence the LocationBase object to which it +// corresponds) is constrained mostly by the instructions that refer to it, but +// PoolObjects can also have inherent constraints, such as alignment. +// +// LocationBase objects, unlike PoolObject objects, can be used outside of the +// pool manager (e.g. as manually placed literals, which may still have +// forward references that need to be resolved). +// +// At the moment, each LocationBase will have at most one PoolObject that keeps +// the relevant information for placing this object in the pool. When that +// object is placed, all forward references of the object are resolved. For +// that reason, we do not need to keep track of the ForwardReference objects in +// the PoolObject. + +// T is an integral type used for representing locations. For a 32-bit +// architecture it will typically be int32_t, whereas for a 64-bit +// architecture it will be int64_t. +template +class ForwardReference; +template +class PoolObject; +template +class PoolManager; + +// Represents an object that has a size and alignment, and either has a known +// location or has not been placed yet. An object of a subclass of LocationBase +// will typically keep track of a number of ForwardReferences when it has not +// yet been placed, but LocationBase does not assume or implement that +// functionality. LocationBase provides virtual methods for emitting the +// object, updating all the forward references, and giving the PoolManager +// information on the lifetime of this object and the corresponding PoolObject. +template +class LocationBase { + public: + // The size of a LocationBase object is restricted to 4KB, in order to avoid + // situations where the size of the pool becomes larger than the range of + // an unconditional branch. This cannot happen without having large objects, + // as typically the range of an unconditional branch is the larger range + // an instruction supports. + // TODO: This would ideally be an architecture-specific value, perhaps + // another template parameter. + static const int kMaxObjectSize = 4 * KBytes; + + // By default, LocationBase objects are aligned naturally to their size. + LocationBase(uint32_t type, int size) + : pool_object_size_(size), + pool_object_alignment_(size), + pool_object_type_(type), + is_bound_(false), + location_(0) { + VIXL_ASSERT(size > 0); + VIXL_ASSERT(size <= kMaxObjectSize); + VIXL_ASSERT(IsPowerOf2(size)); + } + + // Allow alignment to be specified, as long as it is smaller than the size. + LocationBase(uint32_t type, int size, int alignment) + : pool_object_size_(size), + pool_object_alignment_(alignment), + pool_object_type_(type), + is_bound_(false), + location_(0) { + VIXL_ASSERT(size > 0); + VIXL_ASSERT(size <= kMaxObjectSize); + VIXL_ASSERT(IsPowerOf2(alignment)); + VIXL_ASSERT(alignment <= size); + } + + // Constructor for locations that are already bound. + explicit LocationBase(T location) + : pool_object_size_(-1), + pool_object_alignment_(-1), + pool_object_type_(0), + is_bound_(true), + location_(location) {} + + virtual ~LocationBase() {} + + // The PoolManager should assume ownership of some objects, and delete them + // after they have been placed. This can happen for example for literals that + // are created internally to the MacroAssembler and the user doesn't get a + // handle to. By default, the PoolManager will not do this. + virtual bool ShouldBeDeletedOnPlacementByPoolManager() const { return false; } + // The PoolManager should assume ownership of some objects, and delete them + // when it is destroyed. By default, the PoolManager will not do this. + virtual bool ShouldBeDeletedOnPoolManagerDestruction() const { return false; } + + // Emit the PoolObject. Derived classes will implement this method to emit + // the necessary data and/or code (for example, to emit a literal or a + // veneer). This should not add padding, as it is added explicitly by the pool + // manager. + virtual void EmitPoolObject(MacroAssemblerInterface* masm) = 0; + + // Resolve the references to this object. Will encode the necessary offset + // in the instruction corresponding to each reference and then delete it. + // TODO: An alternative here would be to provide a ResolveReference() + // method that only asks the LocationBase to resolve a specific reference + // (thus allowing the pool manager to resolve some of the references only). + // This would mean we need to have some kind of API to get all the references + // to a LabelObject. + virtual void ResolveReferences(internal::AssemblerBase* assembler) = 0; + + // Returns true when the PoolObject corresponding to this LocationBase object + // needs to be removed from the pool once placed, and false if it needs to + // be updated instead (in which case UpdatePoolObject will be called). + virtual bool ShouldDeletePoolObjectOnPlacement() const { return true; } + + // Update the PoolObject after placing it, if necessary. This will happen for + // example in the case of a placed veneer, where we need to use a new updated + // range and a new reference (from the newly added branch instruction). + // By default, this does nothing, to avoid forcing objects that will not need + // this to have an empty implementation. + virtual void UpdatePoolObject(PoolObject*) {} + + // Implement heuristics for emitting this object. If a margin is to be used + // as a hint during pool emission, we will try not to emit the object if we + // are further away from the maximum reachable location by more than the + // margin. + virtual bool UsePoolObjectEmissionMargin() const { return false; } + virtual T GetPoolObjectEmissionMargin() const { + VIXL_ASSERT(UsePoolObjectEmissionMargin() == false); + return 0; + } + + int GetPoolObjectSizeInBytes() const { return pool_object_size_; } + int GetPoolObjectAlignment() const { return pool_object_alignment_; } + uint32_t GetPoolObjectType() const { return pool_object_type_; } + + bool IsBound() const { return is_bound_; } + T GetLocation() const { return location_; } + + // This function can be called multiple times before the object is marked as + // bound with MarkBound() below. This is because some objects (e.g. the ones + // used to represent labels) can have veneers; every time we place a veneer + // we need to keep track of the location in order to resolve the references + // to the object. Reusing the location_ field for this is convenient. + void SetLocation(internal::AssemblerBase* assembler, T location) { + VIXL_ASSERT(!is_bound_); + location_ = location; + ResolveReferences(assembler); + } + + void MarkBound() { + VIXL_ASSERT(!is_bound_); + is_bound_ = true; + } + + // The following two functions are used when an object is bound by a call to + // PoolManager::Bind(). + virtual int GetMaxAlignment() const { + VIXL_ASSERT(!ShouldDeletePoolObjectOnPlacement()); + return 1; + } + virtual T GetMinLocation() const { + VIXL_ASSERT(!ShouldDeletePoolObjectOnPlacement()); + return 0; + } + + private: + // The size of the corresponding PoolObject, in bytes. + int pool_object_size_; + // The alignment of the corresponding PoolObject; this must be a power of two. + int pool_object_alignment_; + + // Different derived classes should have different type values. This can be + // used internally by the PoolManager for grouping of objects. + uint32_t pool_object_type_; + // Has the object been bound to a location yet? + bool is_bound_; + + protected: + // See comment on SetLocation() for the use of this field. + T location_; +}; + +template +class PoolObject { + public: + // By default, PoolObjects have no inherent position constraints. + explicit PoolObject(LocationBase* parent) + : label_base_(parent), + min_location_(0), + max_location_(std::numeric_limits::max()), + alignment_(parent->GetPoolObjectAlignment()), + skip_until_location_hint_(0), + type_(parent->GetPoolObjectType()) { + VIXL_ASSERT(IsPowerOf2(alignment_)); + UpdateLocationHint(); + } + + // Reset the minimum and maximum location and the alignment of the object. + // This function is public in order to allow the LocationBase corresponding to + // this PoolObject to update the PoolObject when placed, e.g. in the case of + // veneers. The size and type of the object cannot be modified. + void Update(T min, T max, int alignment) { + // We don't use RestrictRange here as the new range is independent of the + // old range (and the maximum location is typically larger). + min_location_ = min; + max_location_ = max; + RestrictAlignment(alignment); + UpdateLocationHint(); + } + + private: + void RestrictRange(T min, T max) { + VIXL_ASSERT(min <= max_location_); + VIXL_ASSERT(max >= min_location_); + min_location_ = std::max(min_location_, min); + max_location_ = std::min(max_location_, max); + UpdateLocationHint(); + } + + void RestrictAlignment(int alignment) { + VIXL_ASSERT(IsPowerOf2(alignment)); + VIXL_ASSERT(IsPowerOf2(alignment_)); + alignment_ = std::max(alignment_, alignment); + } + + void UpdateLocationHint() { + if (label_base_->UsePoolObjectEmissionMargin()) { + skip_until_location_hint_ = + max_location_ - label_base_->GetPoolObjectEmissionMargin(); + } + } + + // The LocationBase that this pool object represents. + LocationBase* label_base_; + + // Hard, precise location constraints for the start location of the object. + // They are both inclusive, that is the start location of the object can be + // at any location between min_location_ and max_location_, themselves + // included. + T min_location_; + T max_location_; + + // The alignment must be a power of two. + int alignment_; + + // Avoid generating this object until skip_until_location_hint_. This + // supports cases where placing the object in the pool has an inherent cost + // that could be avoided in some other way. Veneers are a typical example; we + // would prefer to branch directly (over a pool) rather than use veneers, so + // this value can be set using some heuristic to leave them in the pool. + // This value is only a hint, which will be ignored if it has to in order to + // meet the hard constraints we have. + T skip_until_location_hint_; + + // Used only to group objects of similar type together. The PoolManager does + // not know what the types represent. + uint32_t type_; + + friend class PoolManager; +}; + +// Class that represents a forward reference. It is the responsibility of +// LocationBase objects to keep track of forward references and patch them when +// an object is placed - this class is only used by the PoolManager in order to +// restrict the requirements on PoolObjects it is tracking. +template +class ForwardReference { + public: + ForwardReference(T location, + int size, + T min_object_location, + T max_object_location, + int object_alignment = 1) + : location_(location), + size_(size), + object_alignment_(object_alignment), + min_object_location_(min_object_location), + max_object_location_(max_object_location) { + VIXL_ASSERT(AlignDown(max_object_location, object_alignment) >= + min_object_location); + } + + bool LocationIsEncodable(T location) const { + return location >= min_object_location_ && + location <= max_object_location_ && + IsAligned(location, object_alignment_); + } + + T GetLocation() const { return location_; } + T GetMinLocation() const { return min_object_location_; } + T GetMaxLocation() const { return max_object_location_; } + int GetAlignment() const { return object_alignment_; } + + // Needed for InvalSet. + void SetLocationToInvalidateOnly(T location) { location_ = location; } + + private: + // The location of the thing that contains the reference. For example, this + // can be the location of the branch or load instruction. + T location_; + + // The size of the instruction that makes the reference, in bytes. + int size_; + + // The alignment that the object must satisfy for this reference - must be a + // power of two. + int object_alignment_; + + // Specify the possible locations where the object could be stored. AArch32's + // PC offset, and T32's PC alignment calculations should be applied by the + // Assembler, not here. The PoolManager deals only with simple locationes. + // Including min_object_adddress_ is necessary to handle AArch32 some + // instructions which have a minimum offset of 0, but also have the implicit + // PC offset. + // Note that this structure cannot handle sparse ranges, such as A32's ADR, + // but doing so is costly and probably not useful in practice. The min and + // and max object location both refer to the beginning of the object, are + // inclusive and are not affected by the object size. E.g. if + // max_object_location_ is equal to X, we can place the object at location X + // regardless of its size. + T min_object_location_; + T max_object_location_; + + friend class PoolManager; +}; + + +template +class PoolManager { + public: + PoolManager(int header_size, int alignment, int buffer_alignment) + : header_size_(header_size), + alignment_(alignment), + buffer_alignment_(buffer_alignment), + checkpoint_(std::numeric_limits::max()), + max_pool_size_(0), + monitor_(0) {} + + ~PoolManager(); + + // Check if we will need to emit the pool at location 'pc', when planning to + // generate a certain number of bytes. This optionally takes a + // ForwardReference we are about to generate, in which case the size of the + // reference must be included in 'num_bytes'. + bool MustEmit(T pc, + int num_bytes = 0, + ForwardReference* reference = NULL, + LocationBase* object = NULL) const; + + enum EmitOption { kBranchRequired, kNoBranchRequired }; + + // Emit the pool at location 'pc', using 'masm' as the macroassembler. + // The branch over the header can be optionally omitted using 'option'. + // Returns the new PC after pool emission. + // This expects a number of bytes that are about to be emitted, to be taken + // into account in heuristics for pool object emission. + // This also optionally takes a forward reference and an object as + // parameters, to be used in the case where emission of the pool is triggered + // by adding a new reference to the pool that does not fit. The pool manager + // will need this information in order to apply its heuristics correctly. + T Emit(MacroAssemblerInterface* masm, + T pc, + int num_bytes = 0, + ForwardReference* new_reference = NULL, + LocationBase* new_object = NULL, + EmitOption option = kBranchRequired); + + // Add 'reference' to 'object'. Should not be preceded by a call to MustEmit() + // that returned true, unless Emit() has been successfully afterwards. + void AddObjectReference(const ForwardReference* reference, + LocationBase* object); + + // This is to notify the pool that a LocationBase has been bound to a location + // and does not need to be tracked anymore. + // This will happen, for example, for Labels, which are manually bound by the + // user. + // This can potentially add some padding bytes in order to meet the object + // requirements, and will return the new location. + T Bind(MacroAssemblerInterface* masm, LocationBase* object, T location); + + // Functions for blocking and releasing the pools. + void Block() { monitor_++; } + void Release(T pc); + bool IsBlocked() const { return monitor_ != 0; } + + private: + typedef typename std::vector >::iterator objects_iter; + typedef + typename std::vector >::const_iterator const_objects_iter; + + PoolObject* GetObjectIfTracked(LocationBase* label) { + return const_cast*>( + static_cast*>(this)->GetObjectIfTracked(label)); + } + + const PoolObject* GetObjectIfTracked(LocationBase* label) const { + for (const_objects_iter iter = objects_.begin(); iter != objects_.end(); + ++iter) { + const PoolObject& current = *iter; + if (current.label_base_ == label) return ¤t; + } + return NULL; + } + + // Helper function for calculating the checkpoint. + enum SortOption { kSortRequired, kNoSortRequired }; + void RecalculateCheckpoint(SortOption sort_option = kSortRequired); + + // Comparison function for using std::sort() on objects_. PoolObject A is + // ordered before PoolObject B when A should be emitted before B. The + // comparison depends on the max_location_, size_, alignment_ and + // min_location_. + static bool PoolObjectLessThan(const PoolObject& a, + const PoolObject& b); + + // Helper function used in the checkpoint calculation. 'checkpoint' is the + // current checkpoint, which is modified to take 'object' into account. The + // new checkpoint is returned. + static T UpdateCheckpointForObject(T checkpoint, const PoolObject* object); + + // Helper function to add a new object into a sorted objects_ array. + void Insert(const PoolObject& new_object); + + // Helper functions to remove an object from objects_ and delete the + // corresponding LocationBase object, if necessary. This will be called + // either after placing the object, or when Bind() is called. + void RemoveAndDelete(PoolObject* object); + objects_iter RemoveAndDelete(objects_iter iter); + + // Helper function to check if we should skip emitting an object. + bool ShouldSkipObject(PoolObject* pool_object, + T pc, + int num_bytes, + ForwardReference* new_reference, + LocationBase* new_object, + PoolObject* existing_object) const; + + // Used only for debugging. + void DumpCurrentState(T pc) const; + + // Methods used for testing only, via the test friend classes. + bool PoolIsEmptyForTest() const { return objects_.empty(); } + T GetCheckpointForTest() const { return checkpoint_; } + int GetPoolSizeForTest() const; + + // The objects we are tracking references to. The objects_ vector is sorted + // at all times between calls to the public members of the PoolManager. It + // is sorted every time we add, delete or update a PoolObject. + // TODO: Consider a more efficient data structure here, to allow us to delete + // elements as we emit them. + std::vector > objects_; + + // Objects to be deleted on pool destruction. + std::vector*> delete_on_destruction_; + + // The header_size_ and alignment_ values are hardcoded for each instance of + // PoolManager. The PoolManager does not know how to emit the header, and + // relies on the EmitPoolHeader and EndPool methods of the + // MacroAssemblerInterface for that. It will also emit padding if necessary, + // both for the header and at the end of the pool, according to alignment_, + // and using the EmitNopBytes and EmitPaddingBytes method of the + // MacroAssemblerInterface. + + // The size of the header, in bytes. + int header_size_; + // The alignment of the header - must be a power of two. + int alignment_; + // The alignment of the buffer - we cannot guarantee any object alignment + // larger than this alignment. When a buffer is grown, this alignment has + // to be guaranteed. + // TODO: Consider extending this to describe the guaranteed alignment as the + // modulo of a known number. + int buffer_alignment_; + + // The current checkpoint. This is the latest location at which the pool + // *must* be emitted. This should not be visible outside the pool manager + // and should only be updated in RecalculateCheckpoint. + T checkpoint_; + + // Maximum size of the pool, assuming we need the maximum possible padding + // for each object and for the header. It is only updated in + // RecalculateCheckpoint. + T max_pool_size_; + + // Indicates whether the emission of this pool is blocked. + int monitor_; + + friend class vixl::TestPoolManager; +}; + + +} // namespace vixl + +#endif // VIXL_POOL_MANAGER_H_ diff --git a/module/src/main/cpp/whale/src/assembler/vixl/utils-vixl.cc b/module/src/main/cpp/whale/src/assembler/vixl/utils-vixl.cc new file mode 100644 index 00000000..41b55868 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/utils-vixl.cc @@ -0,0 +1,555 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include "utils-vixl.h" + +namespace vixl { + +// The default NaN values (for FPCR.DN=1). +const double kFP64DefaultNaN = RawbitsToDouble(UINT64_C(0x7ff8000000000000)); +const float kFP32DefaultNaN = RawbitsToFloat(0x7fc00000); +const Float16 kFP16DefaultNaN = RawbitsToFloat16(0x7e00); + +// Floating-point zero values. +const Float16 kFP16PositiveZero = RawbitsToFloat16(0x0); +const Float16 kFP16NegativeZero = RawbitsToFloat16(0x8000); + +// Floating-point infinity values. +const Float16 kFP16PositiveInfinity = RawbitsToFloat16(0x7c00); +const Float16 kFP16NegativeInfinity = RawbitsToFloat16(0xfc00); +const float kFP32PositiveInfinity = RawbitsToFloat(0x7f800000); +const float kFP32NegativeInfinity = RawbitsToFloat(0xff800000); +const double kFP64PositiveInfinity = + RawbitsToDouble(UINT64_C(0x7ff0000000000000)); +const double kFP64NegativeInfinity = + RawbitsToDouble(UINT64_C(0xfff0000000000000)); + +bool IsZero(Float16 value) { + uint16_t bits = Float16ToRawbits(value); + return (bits == Float16ToRawbits(kFP16PositiveZero) || + bits == Float16ToRawbits(kFP16NegativeZero)); +} + +uint16_t Float16ToRawbits(Float16 value) { return value.rawbits_; } + +uint32_t FloatToRawbits(float value) { + uint32_t bits = 0; + memcpy(&bits, &value, 4); + return bits; +} + + +uint64_t DoubleToRawbits(double value) { + uint64_t bits = 0; + memcpy(&bits, &value, 8); + return bits; +} + + +Float16 RawbitsToFloat16(uint16_t bits) { + Float16 f; + f.rawbits_ = bits; + return f; +} + + +float RawbitsToFloat(uint32_t bits) { + float value = 0.0; + memcpy(&value, &bits, 4); + return value; +} + + +double RawbitsToDouble(uint64_t bits) { + double value = 0.0; + memcpy(&value, &bits, 8); + return value; +} + + +uint32_t Float16Sign(internal::SimFloat16 val) { + uint16_t rawbits = Float16ToRawbits(val); + return ExtractUnsignedBitfield32(15, 15, rawbits); +} + + +uint32_t Float16Exp(internal::SimFloat16 val) { + uint16_t rawbits = Float16ToRawbits(val); + return ExtractUnsignedBitfield32(14, 10, rawbits); +} + +uint32_t Float16Mantissa(internal::SimFloat16 val) { + uint16_t rawbits = Float16ToRawbits(val); + return ExtractUnsignedBitfield32(9, 0, rawbits); +} + + +uint32_t FloatSign(float val) { + uint32_t rawbits = FloatToRawbits(val); + return ExtractUnsignedBitfield32(31, 31, rawbits); +} + + +uint32_t FloatExp(float val) { + uint32_t rawbits = FloatToRawbits(val); + return ExtractUnsignedBitfield32(30, 23, rawbits); +} + + +uint32_t FloatMantissa(float val) { + uint32_t rawbits = FloatToRawbits(val); + return ExtractUnsignedBitfield32(22, 0, rawbits); +} + + +uint32_t DoubleSign(double val) { + uint64_t rawbits = DoubleToRawbits(val); + return static_cast(ExtractUnsignedBitfield64(63, 63, rawbits)); +} + + +uint32_t DoubleExp(double val) { + uint64_t rawbits = DoubleToRawbits(val); + return static_cast(ExtractUnsignedBitfield64(62, 52, rawbits)); +} + + +uint64_t DoubleMantissa(double val) { + uint64_t rawbits = DoubleToRawbits(val); + return ExtractUnsignedBitfield64(51, 0, rawbits); +} + + +internal::SimFloat16 Float16Pack(uint16_t sign, + uint16_t exp, + uint16_t mantissa) { + uint16_t bits = (sign << 15) | (exp << 10) | mantissa; + return RawbitsToFloat16(bits); +} + + +float FloatPack(uint32_t sign, uint32_t exp, uint32_t mantissa) { + uint32_t bits = (sign << 31) | (exp << 23) | mantissa; + return RawbitsToFloat(bits); +} + + +double DoublePack(uint64_t sign, uint64_t exp, uint64_t mantissa) { + uint64_t bits = (sign << 63) | (exp << 52) | mantissa; + return RawbitsToDouble(bits); +} + + +int Float16Classify(Float16 value) { + uint16_t bits = Float16ToRawbits(value); + uint16_t exponent_max = (1 << 5) - 1; + uint16_t exponent_mask = exponent_max << 10; + uint16_t mantissa_mask = (1 << 10) - 1; + + uint16_t exponent = (bits & exponent_mask) >> 10; + uint16_t mantissa = bits & mantissa_mask; + if (exponent == 0) { + if (mantissa == 0) { + return FP_ZERO; + } + return FP_SUBNORMAL; + } else if (exponent == exponent_max) { + if (mantissa == 0) { + return FP_INFINITE; + } + return FP_NAN; + } + return FP_NORMAL; +} + + +unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) { + VIXL_ASSERT((reg_size % 8) == 0); + int count = 0; + for (unsigned i = 0; i < (reg_size / 16); i++) { + if ((imm & 0xffff) == 0) { + count++; + } + imm >>= 16; + } + return count; +} + + +int BitCount(uint64_t value) { return CountSetBits(value); } + +// Float16 definitions. + +Float16::Float16(double dvalue) { + rawbits_ = + Float16ToRawbits(FPToFloat16(dvalue, FPTieEven, kIgnoreDefaultNaN)); +} + +namespace internal { + +SimFloat16 SimFloat16::operator-() const { + return RawbitsToFloat16(rawbits_ ^ 0x8000); +} + +// SimFloat16 definitions. +SimFloat16 SimFloat16::operator+(SimFloat16 rhs) const { + return static_cast(*this) + static_cast(rhs); +} + +SimFloat16 SimFloat16::operator-(SimFloat16 rhs) const { + return static_cast(*this) - static_cast(rhs); +} + +SimFloat16 SimFloat16::operator*(SimFloat16 rhs) const { + return static_cast(*this) * static_cast(rhs); +} + +SimFloat16 SimFloat16::operator/(SimFloat16 rhs) const { + return static_cast(*this) / static_cast(rhs); +} + +bool SimFloat16::operator<(SimFloat16 rhs) const { + return static_cast(*this) < static_cast(rhs); +} + +bool SimFloat16::operator>(SimFloat16 rhs) const { + return static_cast(*this) > static_cast(rhs); +} + +bool SimFloat16::operator==(SimFloat16 rhs) const { + if (IsNaN(*this) || IsNaN(rhs)) { + return false; + } else if (IsZero(rhs) && IsZero(*this)) { + // +0 and -0 should be treated as equal. + return true; + } + return this->rawbits_ == rhs.rawbits_; +} + +bool SimFloat16::operator!=(SimFloat16 rhs) const { return !(*this == rhs); } + +bool SimFloat16::operator==(double rhs) const { + return static_cast(*this) == static_cast(rhs); +} + +SimFloat16::operator double() const { + return FPToDouble(*this, kIgnoreDefaultNaN); +} + +Int64 BitCount(Uint32 value) { return CountSetBits(value.Get()); } + +} // namespace internal + +float FPToFloat(Float16 value, UseDefaultNaN DN, bool* exception) { + uint16_t bits = Float16ToRawbits(value); + uint32_t sign = bits >> 15; + uint32_t exponent = + ExtractUnsignedBitfield32(kFloat16MantissaBits + kFloat16ExponentBits - 1, + kFloat16MantissaBits, + bits); + uint32_t mantissa = + ExtractUnsignedBitfield32(kFloat16MantissaBits - 1, 0, bits); + + switch (Float16Classify(value)) { + case FP_ZERO: + return (sign == 0) ? 0.0f : -0.0f; + + case FP_INFINITE: + return (sign == 0) ? kFP32PositiveInfinity : kFP32NegativeInfinity; + + case FP_SUBNORMAL: { + // Calculate shift required to put mantissa into the most-significant bits + // of the destination mantissa. + int shift = CountLeadingZeros(mantissa << (32 - 10)); + + // Shift mantissa and discard implicit '1'. + mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits) + shift + 1; + mantissa &= (1 << kFloatMantissaBits) - 1; + + // Adjust the exponent for the shift applied, and rebias. + exponent = exponent - shift + (-15 + 127); + break; + } + + case FP_NAN: + if (IsSignallingNaN(value)) { + if (exception != NULL) { + *exception = true; + } + } + if (DN == kUseDefaultNaN) return kFP32DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The payload (mantissa) is transferred entirely, except that the top + // bit is forced to '1', making the result a quiet NaN. The unused + // (low-order) payload bits are set to 0. + exponent = (1 << kFloatExponentBits) - 1; + + // Increase bits in mantissa, making low-order bits 0. + mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits); + mantissa |= 1 << 22; // Force a quiet NaN. + break; + + case FP_NORMAL: + // Increase bits in mantissa, making low-order bits 0. + mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits); + + // Change exponent bias. + exponent += (-15 + 127); + break; + + default: + VIXL_UNREACHABLE(); + } + return RawbitsToFloat((sign << 31) | (exponent << kFloatMantissaBits) | + mantissa); +} + + +float FPToFloat(double value, + FPRounding round_mode, + UseDefaultNaN DN, + bool* exception) { + // Only the FPTieEven rounding mode is implemented. + VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd)); + USE(round_mode); + + switch (std::fpclassify(value)) { + case FP_NAN: { + if (IsSignallingNaN(value)) { + if (exception != NULL) { + *exception = true; + } + } + if (DN == kUseDefaultNaN) return kFP32DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The payload (mantissa) is transferred as much as possible, except + // that the top bit is forced to '1', making the result a quiet NaN. + uint64_t raw = DoubleToRawbits(value); + + uint32_t sign = raw >> 63; + uint32_t exponent = (1 << 8) - 1; + uint32_t payload = + static_cast(ExtractUnsignedBitfield64(50, 52 - 23, raw)); + payload |= (1 << 22); // Force a quiet NaN. + + return RawbitsToFloat((sign << 31) | (exponent << 23) | payload); + } + + case FP_ZERO: + case FP_INFINITE: { + // In a C++ cast, any value representable in the target type will be + // unchanged. This is always the case for +/-0.0 and infinities. + return static_cast(value); + } + + case FP_NORMAL: + case FP_SUBNORMAL: { + // Convert double-to-float as the processor would, assuming that FPCR.FZ + // (flush-to-zero) is not set. + uint64_t raw = DoubleToRawbits(value); + // Extract the IEEE-754 double components. + uint32_t sign = raw >> 63; + // Extract the exponent and remove the IEEE-754 encoding bias. + int32_t exponent = + static_cast(ExtractUnsignedBitfield64(62, 52, raw)) - 1023; + // Extract the mantissa and add the implicit '1' bit. + uint64_t mantissa = ExtractUnsignedBitfield64(51, 0, raw); + if (std::fpclassify(value) == FP_NORMAL) { + mantissa |= (UINT64_C(1) << 52); + } + return FPRoundToFloat(sign, exponent, mantissa, round_mode); + } + } + + VIXL_UNREACHABLE(); + return value; +} + +// TODO: We should consider implementing a full FPToDouble(Float16) +// conversion function (for performance reasons). +double FPToDouble(Float16 value, UseDefaultNaN DN, bool* exception) { + // We can rely on implicit float to double conversion here. + return FPToFloat(value, DN, exception); +} + + +double FPToDouble(float value, UseDefaultNaN DN, bool* exception) { + switch (std::fpclassify(value)) { + case FP_NAN: { + if (IsSignallingNaN(value)) { + if (exception != NULL) { + *exception = true; + } + } + if (DN == kUseDefaultNaN) return kFP64DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The payload (mantissa) is transferred entirely, except that the top + // bit is forced to '1', making the result a quiet NaN. The unused + // (low-order) payload bits are set to 0. + uint32_t raw = FloatToRawbits(value); + + uint64_t sign = raw >> 31; + uint64_t exponent = (1 << 11) - 1; + uint64_t payload = ExtractUnsignedBitfield64(21, 0, raw); + payload <<= (52 - 23); // The unused low-order bits should be 0. + payload |= (UINT64_C(1) << 51); // Force a quiet NaN. + + return RawbitsToDouble((sign << 63) | (exponent << 52) | payload); + } + + case FP_ZERO: + case FP_NORMAL: + case FP_SUBNORMAL: + case FP_INFINITE: { + // All other inputs are preserved in a standard cast, because every value + // representable using an IEEE-754 float is also representable using an + // IEEE-754 double. + return static_cast(value); + } + } + + VIXL_UNREACHABLE(); + return static_cast(value); +} + + +Float16 FPToFloat16(float value, + FPRounding round_mode, + UseDefaultNaN DN, + bool* exception) { + // Only the FPTieEven rounding mode is implemented. + VIXL_ASSERT(round_mode == FPTieEven); + USE(round_mode); + + uint32_t raw = FloatToRawbits(value); + int32_t sign = raw >> 31; + int32_t exponent = ExtractUnsignedBitfield32(30, 23, raw) - 127; + uint32_t mantissa = ExtractUnsignedBitfield32(22, 0, raw); + + switch (std::fpclassify(value)) { + case FP_NAN: { + if (IsSignallingNaN(value)) { + if (exception != NULL) { + *exception = true; + } + } + if (DN == kUseDefaultNaN) return kFP16DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The payload (mantissa) is transferred as much as possible, except + // that the top bit is forced to '1', making the result a quiet NaN. + uint16_t result = (sign == 0) ? Float16ToRawbits(kFP16PositiveInfinity) + : Float16ToRawbits(kFP16NegativeInfinity); + result |= mantissa >> (kFloatMantissaBits - kFloat16MantissaBits); + result |= (1 << 9); // Force a quiet NaN; + return RawbitsToFloat16(result); + } + + case FP_ZERO: + return (sign == 0) ? kFP16PositiveZero : kFP16NegativeZero; + + case FP_INFINITE: + return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity; + + case FP_NORMAL: + case FP_SUBNORMAL: { + // Convert float-to-half as the processor would, assuming that FPCR.FZ + // (flush-to-zero) is not set. + + // Add the implicit '1' bit to the mantissa. + mantissa += (1 << 23); + return FPRoundToFloat16(sign, exponent, mantissa, round_mode); + } + } + + VIXL_UNREACHABLE(); + return kFP16PositiveZero; +} + + +Float16 FPToFloat16(double value, + FPRounding round_mode, + UseDefaultNaN DN, + bool* exception) { + // Only the FPTieEven rounding mode is implemented. + VIXL_ASSERT(round_mode == FPTieEven); + USE(round_mode); + + uint64_t raw = DoubleToRawbits(value); + int32_t sign = raw >> 63; + int64_t exponent = ExtractUnsignedBitfield64(62, 52, raw) - 1023; + uint64_t mantissa = ExtractUnsignedBitfield64(51, 0, raw); + + switch (std::fpclassify(value)) { + case FP_NAN: { + if (IsSignallingNaN(value)) { + if (exception != NULL) { + *exception = true; + } + } + if (DN == kUseDefaultNaN) return kFP16DefaultNaN; + + // Convert NaNs as the processor would: + // - The sign is propagated. + // - The payload (mantissa) is transferred as much as possible, except + // that the top bit is forced to '1', making the result a quiet NaN. + uint16_t result = (sign == 0) ? Float16ToRawbits(kFP16PositiveInfinity) + : Float16ToRawbits(kFP16NegativeInfinity); + result |= mantissa >> (kDoubleMantissaBits - kFloat16MantissaBits); + result |= (1 << 9); // Force a quiet NaN; + return RawbitsToFloat16(result); + } + + case FP_ZERO: + return (sign == 0) ? kFP16PositiveZero : kFP16NegativeZero; + + case FP_INFINITE: + return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity; + case FP_NORMAL: + case FP_SUBNORMAL: { + // Convert double-to-half as the processor would, assuming that FPCR.FZ + // (flush-to-zero) is not set. + + // Add the implicit '1' bit to the mantissa. + mantissa += (UINT64_C(1) << 52); + return FPRoundToFloat16(sign, exponent, mantissa, round_mode); + } + } + + VIXL_UNREACHABLE(); + return kFP16PositiveZero; +} + +} // namespace vixl diff --git a/module/src/main/cpp/whale/src/assembler/vixl/utils-vixl.h b/module/src/main/cpp/whale/src/assembler/vixl/utils-vixl.h new file mode 100644 index 00000000..1c76fcb2 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/vixl/utils-vixl.h @@ -0,0 +1,1281 @@ +// Copyright 2015, VIXL authors +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of ARM Limited nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef VIXL_UTILS_H +#define VIXL_UTILS_H + +#include +#include +#include +#include + +#include "compiler-intrinsics-vixl.h" +#include "globals-vixl.h" + +namespace vixl { + +// Macros for compile-time format checking. +#if GCC_VERSION_OR_NEWER(4, 4, 0) +#define PRINTF_CHECK(format_index, varargs_index) \ + __attribute__((format(gnu_printf, format_index, varargs_index))) +#else +#define PRINTF_CHECK(format_index, varargs_index) +#endif + +#ifdef __GNUC__ +#define VIXL_HAS_DEPRECATED_WITH_MSG +#elif defined(__clang__) +#ifdef __has_extension(attribute_deprecated_with_message) +#define VIXL_HAS_DEPRECATED_WITH_MSG +#endif +#endif + +#ifdef VIXL_HAS_DEPRECATED_WITH_MSG +#define VIXL_DEPRECATED(replaced_by, declarator) \ + __attribute__((deprecated("Use \"" replaced_by "\" instead"))) declarator +#else +#define VIXL_DEPRECATED(replaced_by, declarator) declarator +#endif + +#ifdef VIXL_DEBUG +#define VIXL_UNREACHABLE_OR_FALLTHROUGH() VIXL_UNREACHABLE() +#else +#define VIXL_UNREACHABLE_OR_FALLTHROUGH() VIXL_FALLTHROUGH() +#endif + +template +size_t ArrayLength(const T (&)[n]) { + return n; +} + +// Check number width. +// TODO: Refactor these using templates. +inline bool IsIntN(unsigned n, uint32_t x) { + VIXL_ASSERT((0 < n) && (n < 32)); + uint32_t limit = UINT32_C(1) << (n - 1); + return x < limit; +} +inline bool IsIntN(unsigned n, int32_t x) { + VIXL_ASSERT((0 < n) && (n < 32)); + int32_t limit = INT32_C(1) << (n - 1); + return (-limit <= x) && (x < limit); +} +inline bool IsIntN(unsigned n, uint64_t x) { + VIXL_ASSERT((0 < n) && (n < 64)); + uint64_t limit = UINT64_C(1) << (n - 1); + return x < limit; +} +inline bool IsIntN(unsigned n, int64_t x) { + VIXL_ASSERT((0 < n) && (n < 64)); + int64_t limit = INT64_C(1) << (n - 1); + return (-limit <= x) && (x < limit); +} +VIXL_DEPRECATED("IsIntN", inline bool is_intn(unsigned n, int64_t x)) { + return IsIntN(n, x); +} + +inline bool IsUintN(unsigned n, uint32_t x) { + VIXL_ASSERT((0 < n) && (n < 32)); + return !(x >> n); +} +inline bool IsUintN(unsigned n, int32_t x) { + VIXL_ASSERT((0 < n) && (n < 32)); + // Convert to an unsigned integer to avoid implementation-defined behavior. + return !(static_cast(x) >> n); +} +inline bool IsUintN(unsigned n, uint64_t x) { + VIXL_ASSERT((0 < n) && (n < 64)); + return !(x >> n); +} +inline bool IsUintN(unsigned n, int64_t x) { + VIXL_ASSERT((0 < n) && (n < 64)); + // Convert to an unsigned integer to avoid implementation-defined behavior. + return !(static_cast(x) >> n); +} +VIXL_DEPRECATED("IsUintN", inline bool is_uintn(unsigned n, int64_t x)) { + return IsUintN(n, x); +} + +inline uint64_t TruncateToUintN(unsigned n, uint64_t x) { + VIXL_ASSERT((0 < n) && (n < 64)); + return static_cast(x) & ((UINT64_C(1) << n) - 1); +} +VIXL_DEPRECATED("TruncateToUintN", + inline uint64_t truncate_to_intn(unsigned n, int64_t x)) { + return TruncateToUintN(n, x); +} + +// clang-format off +#define INT_1_TO_32_LIST(V) \ +V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \ +V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \ +V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \ +V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) + +#define INT_33_TO_63_LIST(V) \ +V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \ +V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \ +V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \ +V(57) V(58) V(59) V(60) V(61) V(62) V(63) + +#define INT_1_TO_63_LIST(V) INT_1_TO_32_LIST(V) INT_33_TO_63_LIST(V) + +// clang-format on + +#define DECLARE_IS_INT_N(N) \ + inline bool IsInt##N(int64_t x) { return IsIntN(N, x); } \ + VIXL_DEPRECATED("IsInt" #N, inline bool is_int##N(int64_t x)) { \ + return IsIntN(N, x); \ + } + +#define DECLARE_IS_UINT_N(N) \ + inline bool IsUint##N(int64_t x) { return IsUintN(N, x); } \ + VIXL_DEPRECATED("IsUint" #N, inline bool is_uint##N(int64_t x)) { \ + return IsUintN(N, x); \ + } + +#define DECLARE_TRUNCATE_TO_UINT_32(N) \ + inline uint32_t TruncateToUint##N(uint64_t x) { \ + return static_cast(TruncateToUintN(N, x)); \ + } \ + VIXL_DEPRECATED("TruncateToUint" #N, \ + inline uint32_t truncate_to_int##N(int64_t x)) { \ + return TruncateToUint##N(x); \ + } + +INT_1_TO_63_LIST(DECLARE_IS_INT_N) +INT_1_TO_63_LIST(DECLARE_IS_UINT_N) +INT_1_TO_32_LIST(DECLARE_TRUNCATE_TO_UINT_32) + +#undef DECLARE_IS_INT_N +#undef DECLARE_IS_UINT_N +#undef DECLARE_TRUNCATE_TO_INT_N + +// Bit field extraction. +inline uint64_t ExtractUnsignedBitfield64(int msb, int lsb, uint64_t x) { + VIXL_ASSERT((static_cast(msb) < sizeof(x) * 8) && (lsb >= 0) && + (msb >= lsb)); + if ((msb == 63) && (lsb == 0)) return x; + return (x >> lsb) & ((static_cast(1) << (1 + msb - lsb)) - 1); +} + + +inline uint32_t ExtractUnsignedBitfield32(int msb, int lsb, uint32_t x) { + VIXL_ASSERT((static_cast(msb) < sizeof(x) * 8) && (lsb >= 0) && + (msb >= lsb)); + return TruncateToUint32(ExtractUnsignedBitfield64(msb, lsb, x)); +} + + +inline int64_t ExtractSignedBitfield64(int msb, int lsb, int64_t x) { + VIXL_ASSERT((static_cast(msb) < sizeof(x) * 8) && (lsb >= 0) && + (msb >= lsb)); + uint64_t temp = ExtractUnsignedBitfield64(msb, lsb, x); + // If the highest extracted bit is set, sign extend. + if ((temp >> (msb - lsb)) == 1) { + temp |= ~UINT64_C(0) << (msb - lsb); + } + int64_t result; + memcpy(&result, &temp, sizeof(result)); + return result; +} + + +inline int32_t ExtractSignedBitfield32(int msb, int lsb, int32_t x) { + VIXL_ASSERT((static_cast(msb) < sizeof(x) * 8) && (lsb >= 0) && + (msb >= lsb)); + uint32_t temp = TruncateToUint32(ExtractSignedBitfield64(msb, lsb, x)); + int32_t result; + memcpy(&result, &temp, sizeof(result)); + return result; +} + + +inline uint64_t RotateRight(uint64_t value, + unsigned int rotate, + unsigned int width) { + VIXL_ASSERT((width > 0) && (width <= 64)); + uint64_t width_mask = ~UINT64_C(0) >> (64 - width); + rotate &= 63; + if (rotate > 0) { + value &= width_mask; + value = (value << (width - rotate)) | (value >> rotate); + } + return value & width_mask; +} + + +// Wrapper class for passing FP16 values through the assembler. +// This is purely to aid with type checking/casting. +class Float16 { + public: + explicit Float16(double dvalue); + Float16() : rawbits_(0x0) {} + friend uint16_t Float16ToRawbits(Float16 value); + friend Float16 RawbitsToFloat16(uint16_t bits); + + protected: + uint16_t rawbits_; +}; + +// Floating point representation. +uint16_t Float16ToRawbits(Float16 value); + + +uint32_t FloatToRawbits(float value); +VIXL_DEPRECATED("FloatToRawbits", + inline uint32_t float_to_rawbits(float value)) { + return FloatToRawbits(value); +} + +uint64_t DoubleToRawbits(double value); +VIXL_DEPRECATED("DoubleToRawbits", + inline uint64_t double_to_rawbits(double value)) { + return DoubleToRawbits(value); +} + +Float16 RawbitsToFloat16(uint16_t bits); + +float RawbitsToFloat(uint32_t bits); +VIXL_DEPRECATED("RawbitsToFloat", + inline float rawbits_to_float(uint32_t bits)) { + return RawbitsToFloat(bits); +} + +double RawbitsToDouble(uint64_t bits); +VIXL_DEPRECATED("RawbitsToDouble", + inline double rawbits_to_double(uint64_t bits)) { + return RawbitsToDouble(bits); +} + +namespace internal { + +// Internal simulation class used solely by the simulator to +// provide an abstraction layer for any half-precision arithmetic. +class SimFloat16 : public Float16 { + public: + // TODO: We should investigate making this constructor explicit. + // This is currently difficult to do due to a number of templated + // functions in the simulator which rely on returning double values. + SimFloat16(double dvalue) : Float16(dvalue) {} // NOLINT(runtime/explicit) + SimFloat16(Float16 f) { // NOLINT(runtime/explicit) + this->rawbits_ = Float16ToRawbits(f); + } + SimFloat16() : Float16() {} + SimFloat16 operator-() const; + SimFloat16 operator+(SimFloat16 rhs) const; + SimFloat16 operator-(SimFloat16 rhs) const; + SimFloat16 operator*(SimFloat16 rhs) const; + SimFloat16 operator/(SimFloat16 rhs) const; + bool operator<(SimFloat16 rhs) const; + bool operator>(SimFloat16 rhs) const; + bool operator==(SimFloat16 rhs) const; + bool operator!=(SimFloat16 rhs) const; + // This is necessary for conversions peformed in (macro asm) Fmov. + bool operator==(double rhs) const; + operator double() const; +}; +} // namespace internal + +uint32_t Float16Sign(internal::SimFloat16 value); + +uint32_t Float16Exp(internal::SimFloat16 value); + +uint32_t Float16Mantissa(internal::SimFloat16 value); + +uint32_t FloatSign(float value); +VIXL_DEPRECATED("FloatSign", inline uint32_t float_sign(float value)) { + return FloatSign(value); +} + +uint32_t FloatExp(float value); +VIXL_DEPRECATED("FloatExp", inline uint32_t float_exp(float value)) { + return FloatExp(value); +} + +uint32_t FloatMantissa(float value); +VIXL_DEPRECATED("FloatMantissa", inline uint32_t float_mantissa(float value)) { + return FloatMantissa(value); +} + +uint32_t DoubleSign(double value); +VIXL_DEPRECATED("DoubleSign", inline uint32_t double_sign(double value)) { + return DoubleSign(value); +} + +uint32_t DoubleExp(double value); +VIXL_DEPRECATED("DoubleExp", inline uint32_t double_exp(double value)) { + return DoubleExp(value); +} + +uint64_t DoubleMantissa(double value); +VIXL_DEPRECATED("DoubleMantissa", + inline uint64_t double_mantissa(double value)) { + return DoubleMantissa(value); +} + +internal::SimFloat16 Float16Pack(uint16_t sign, + uint16_t exp, + uint16_t mantissa); + +float FloatPack(uint32_t sign, uint32_t exp, uint32_t mantissa); +VIXL_DEPRECATED("FloatPack", + inline float float_pack(uint32_t sign, + uint32_t exp, + uint32_t mantissa)) { + return FloatPack(sign, exp, mantissa); +} + +double DoublePack(uint64_t sign, uint64_t exp, uint64_t mantissa); +VIXL_DEPRECATED("DoublePack", + inline double double_pack(uint32_t sign, + uint32_t exp, + uint64_t mantissa)) { + return DoublePack(sign, exp, mantissa); +} + +// An fpclassify() function for 16-bit half-precision floats. +int Float16Classify(Float16 value); +VIXL_DEPRECATED("Float16Classify", inline int float16classify(uint16_t value)) { + return Float16Classify(RawbitsToFloat16(value)); +} + +bool IsZero(Float16 value); + +inline bool IsNaN(float value) { return std::isnan(value); } + +inline bool IsNaN(double value) { return std::isnan(value); } + +inline bool IsNaN(Float16 value) { return Float16Classify(value) == FP_NAN; } + +inline bool IsInf(float value) { return std::isinf(value); } + +inline bool IsInf(double value) { return std::isinf(value); } + +inline bool IsInf(Float16 value) { + return Float16Classify(value) == FP_INFINITE; +} + + +// NaN tests. +inline bool IsSignallingNaN(double num) { + const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000); + uint64_t raw = DoubleToRawbits(num); + if (IsNaN(num) && ((raw & kFP64QuietNaNMask) == 0)) { + return true; + } + return false; +} + + +inline bool IsSignallingNaN(float num) { + const uint32_t kFP32QuietNaNMask = 0x00400000; + uint32_t raw = FloatToRawbits(num); + if (IsNaN(num) && ((raw & kFP32QuietNaNMask) == 0)) { + return true; + } + return false; +} + + +inline bool IsSignallingNaN(Float16 num) { + const uint16_t kFP16QuietNaNMask = 0x0200; + return IsNaN(num) && ((Float16ToRawbits(num) & kFP16QuietNaNMask) == 0); +} + + +template +inline bool IsQuietNaN(T num) { + return IsNaN(num) && !IsSignallingNaN(num); +} + + +// Convert the NaN in 'num' to a quiet NaN. +inline double ToQuietNaN(double num) { + const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000); + VIXL_ASSERT(IsNaN(num)); + return RawbitsToDouble(DoubleToRawbits(num) | kFP64QuietNaNMask); +} + + +inline float ToQuietNaN(float num) { + const uint32_t kFP32QuietNaNMask = 0x00400000; + VIXL_ASSERT(IsNaN(num)); + return RawbitsToFloat(FloatToRawbits(num) | kFP32QuietNaNMask); +} + + +inline internal::SimFloat16 ToQuietNaN(internal::SimFloat16 num) { + const uint16_t kFP16QuietNaNMask = 0x0200; + VIXL_ASSERT(IsNaN(num)); + return internal::SimFloat16( + RawbitsToFloat16(Float16ToRawbits(num) | kFP16QuietNaNMask)); +} + + +// Fused multiply-add. +inline double FusedMultiplyAdd(double op1, double op2, double a) { + return fma(op1, op2, a); +} + + +inline float FusedMultiplyAdd(float op1, float op2, float a) { + return fmaf(op1, op2, a); +} + + +inline uint64_t LowestSetBit(uint64_t value) { return value & -value; } + + +template +inline int HighestSetBitPosition(T value) { + VIXL_ASSERT(value != 0); + return (sizeof(value) * 8 - 1) - CountLeadingZeros(value); +} + + +template +inline int WhichPowerOf2(V value) { + VIXL_ASSERT(IsPowerOf2(value)); + return CountTrailingZeros(value); +} + + +unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size); + + +int BitCount(uint64_t value); + + +template +T ReverseBits(T value) { + VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) || + (sizeof(value) == 4) || (sizeof(value) == 8)); + T result = 0; + for (unsigned i = 0; i < (sizeof(value) * 8); i++) { + result = (result << 1) | (value & 1); + value >>= 1; + } + return result; +} + + +template +inline T SignExtend(T val, int bitSize) { + VIXL_ASSERT(bitSize > 0); + T mask = (T(2) << (bitSize - 1)) - T(1); + val &= mask; + T sign_bits = -((val >> (bitSize - 1)) << bitSize); + val |= sign_bits; + return val; +} + + +template +T ReverseBytes(T value, int block_bytes_log2) { + VIXL_ASSERT((sizeof(value) == 4) || (sizeof(value) == 8)); + VIXL_ASSERT((1U << block_bytes_log2) <= sizeof(value)); + // Split the 64-bit value into an 8-bit array, where b[0] is the least + // significant byte, and b[7] is the most significant. + uint8_t bytes[8]; + uint64_t mask = UINT64_C(0xff00000000000000); + for (int i = 7; i >= 0; i--) { + bytes[i] = (static_cast(value) & mask) >> (i * 8); + mask >>= 8; + } + + // Permutation tables for REV instructions. + // permute_table[0] is used by REV16_x, REV16_w + // permute_table[1] is used by REV32_x, REV_w + // permute_table[2] is used by REV_x + VIXL_ASSERT((0 < block_bytes_log2) && (block_bytes_log2 < 4)); + static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1}, + {4, 5, 6, 7, 0, 1, 2, 3}, + {0, 1, 2, 3, 4, 5, 6, 7}}; + uint64_t temp = 0; + for (int i = 0; i < 8; i++) { + temp <<= 8; + temp |= bytes[permute_table[block_bytes_log2 - 1][i]]; + } + + T result; + VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(temp)); + memcpy(&result, &temp, sizeof(result)); + return result; +} + +template +inline bool IsMultiple(T value) { + VIXL_ASSERT(IsPowerOf2(MULTIPLE)); + return (value & (MULTIPLE - 1)) == 0; +} + +template +inline bool IsMultiple(T value, unsigned multiple) { + VIXL_ASSERT(IsPowerOf2(multiple)); + return (value & (multiple - 1)) == 0; +} + +template +inline bool IsAligned(T pointer, int alignment) { + VIXL_ASSERT(IsPowerOf2(alignment)); + return (pointer & (alignment - 1)) == 0; +} + +// Pointer alignment +// TODO: rename/refactor to make it specific to instructions. +template +inline bool IsAligned(T pointer) { + VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof) + // Use C-style casts to get static_cast behaviour for integral types (T), and + // reinterpret_cast behaviour for other types. + return IsAligned((intptr_t)(pointer), ALIGN); +} + +template +bool IsWordAligned(T pointer) { + return IsAligned<4>(pointer); +} + +// Increment a pointer until it has the specified alignment. The alignment must +// be a power of two. +template +T AlignUp(T pointer, + typename Unsigned::type alignment) { + VIXL_ASSERT(IsPowerOf2(alignment)); + // Use C-style casts to get static_cast behaviour for integral types (T), and + // reinterpret_cast behaviour for other types. + + typename Unsigned::type pointer_raw = + (typename Unsigned::type)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw)); + + size_t mask = alignment - 1; + T result = (T)((pointer_raw + mask) & ~mask); + VIXL_ASSERT(result >= pointer); + + return result; +} + +// Decrement a pointer until it has the specified alignment. The alignment must +// be a power of two. +template +T AlignDown(T pointer, + typename Unsigned::type alignment) { + VIXL_ASSERT(IsPowerOf2(alignment)); + // Use C-style casts to get static_cast behaviour for integral types (T), and + // reinterpret_cast behaviour for other types. + + typename Unsigned::type pointer_raw = + (typename Unsigned::type)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw)); + + size_t mask = alignment - 1; + return (T)(pointer_raw & ~mask); +} + + +template +inline T ExtractBit(T value, unsigned bit) { + return (value >> bit) & T(1); +} + +template +inline Td ExtractBits(Ts value, int least_significant_bit, Td mask) { + return Td((value >> least_significant_bit) & Ts(mask)); +} + +template +inline void AssignBit(Td& dst, // NOLINT(runtime/references) + int bit, + Ts value) { + VIXL_ASSERT((value == Ts(0)) || (value == Ts(1))); + VIXL_ASSERT(bit >= 0); + VIXL_ASSERT(bit < static_cast(sizeof(Td) * 8)); + Td mask(1); + dst &= ~(mask << bit); + dst |= Td(value) << bit; +} + +template +inline void AssignBits(Td& dst, // NOLINT(runtime/references) + int least_significant_bit, + Ts mask, + Ts value) { + VIXL_ASSERT(least_significant_bit >= 0); + VIXL_ASSERT(least_significant_bit < static_cast(sizeof(Td) * 8)); + VIXL_ASSERT(((Td(mask) << least_significant_bit) >> least_significant_bit) == + Td(mask)); + VIXL_ASSERT((value & mask) == value); + dst &= ~(Td(mask) << least_significant_bit); + dst |= Td(value) << least_significant_bit; +} + +class VFP { + public: + static uint32_t FP32ToImm8(float imm) { + // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000 + uint32_t bits = FloatToRawbits(imm); + // bit7: a000.0000 + uint32_t bit7 = ((bits >> 31) & 0x1) << 7; + // bit6: 0b00.0000 + uint32_t bit6 = ((bits >> 29) & 0x1) << 6; + // bit5_to_0: 00cd.efgh + uint32_t bit5_to_0 = (bits >> 19) & 0x3f; + return static_cast(bit7 | bit6 | bit5_to_0); + } + static uint32_t FP64ToImm8(double imm) { + // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 + // 0000.0000.0000.0000.0000.0000.0000.0000 + uint64_t bits = DoubleToRawbits(imm); + // bit7: a000.0000 + uint64_t bit7 = ((bits >> 63) & 0x1) << 7; + // bit6: 0b00.0000 + uint64_t bit6 = ((bits >> 61) & 0x1) << 6; + // bit5_to_0: 00cd.efgh + uint64_t bit5_to_0 = (bits >> 48) & 0x3f; + + return static_cast(bit7 | bit6 | bit5_to_0); + } + static float Imm8ToFP32(uint32_t imm8) { + // Imm8: abcdefgh (8 bits) + // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits) + // where B is b ^ 1 + uint32_t bits = imm8; + uint32_t bit7 = (bits >> 7) & 0x1; + uint32_t bit6 = (bits >> 6) & 0x1; + uint32_t bit5_to_0 = bits & 0x3f; + uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19); + + return RawbitsToFloat(result); + } + static double Imm8ToFP64(uint32_t imm8) { + // Imm8: abcdefgh (8 bits) + // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 + // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits) + // where B is b ^ 1 + uint32_t bits = imm8; + uint64_t bit7 = (bits >> 7) & 0x1; + uint64_t bit6 = (bits >> 6) & 0x1; + uint64_t bit5_to_0 = bits & 0x3f; + uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48); + return RawbitsToDouble(result); + } + static bool IsImmFP32(float imm) { + // Valid values will have the form: + // aBbb.bbbc.defg.h000.0000.0000.0000.0000 + uint32_t bits = FloatToRawbits(imm); + // bits[19..0] are cleared. + if ((bits & 0x7ffff) != 0) { + return false; + } + + + // bits[29..25] are all set or all cleared. + uint32_t b_pattern = (bits >> 16) & 0x3e00; + if (b_pattern != 0 && b_pattern != 0x3e00) { + return false; + } + // bit[30] and bit[29] are opposite. + if (((bits ^ (bits << 1)) & 0x40000000) == 0) { + return false; + } + return true; + } + static bool IsImmFP64(double imm) { + // Valid values will have the form: + // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 + // 0000.0000.0000.0000.0000.0000.0000.0000 + uint64_t bits = DoubleToRawbits(imm); + // bits[47..0] are cleared. + if ((bits & 0x0000ffffffffffff) != 0) { + return false; + } + // bits[61..54] are all set or all cleared. + uint32_t b_pattern = (bits >> 48) & 0x3fc0; + if ((b_pattern != 0) && (b_pattern != 0x3fc0)) { + return false; + } + // bit[62] and bit[61] are opposite. + if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) { + return false; + } + return true; + } +}; + +class BitField { + // ForEachBitHelper is a functor that will call + // bool ForEachBitHelper::execute(ElementType id) const + // and expects a boolean in return whether to continue (if true) + // or stop (if false) + // check_set will check if the bits are on (true) or off(false) + template + bool ForEachBit(const ForEachBitHelper& helper) { + for (int i = 0; static_cast(i) < bitfield_.size(); i++) { + if (bitfield_[i] == check_set) + if (!helper.execute(i)) return false; + } + return true; + } + + public: + explicit BitField(unsigned size) : bitfield_(size, 0) {} + + void Set(int i) { + VIXL_ASSERT((i >= 0) && (static_cast(i) < bitfield_.size())); + bitfield_[i] = true; + } + + void Unset(int i) { + VIXL_ASSERT((i >= 0) && (static_cast(i) < bitfield_.size())); + bitfield_[i] = true; + } + + bool IsSet(int i) const { return bitfield_[i]; } + + // For each bit not set in the bitfield call the execute functor + // execute. + // ForEachBitSetHelper::execute returns true if the iteration through + // the bits can continue, otherwise it will stop. + // struct ForEachBitSetHelper { + // bool execute(int /*id*/) { return false; } + // }; + template + bool ForEachBitNotSet(const ForEachBitNotSetHelper& helper) { + return ForEachBit(helper); + } + + // For each bit set in the bitfield call the execute functor + // execute. + template + bool ForEachBitSet(const ForEachBitSetHelper& helper) { + return ForEachBit(helper); + } + + private: + std::vector bitfield_; +}; + +namespace internal { + +typedef int64_t Int64; +class Uint64; +class Uint128; + +class Uint32 { + uint32_t data_; + + public: + // Unlike uint32_t, Uint32 has a default constructor. + Uint32() { data_ = 0; } + explicit Uint32(uint32_t data) : data_(data) {} + inline explicit Uint32(Uint64 data); + uint32_t Get() const { return data_; } + template + int32_t GetSigned() const { + return ExtractSignedBitfield32(N - 1, 0, data_); + } + int32_t GetSigned() const { return data_; } + Uint32 operator~() const { return Uint32(~data_); } + Uint32 operator-() const { return Uint32(-data_); } + bool operator==(Uint32 value) const { return data_ == value.data_; } + bool operator!=(Uint32 value) const { return data_ != value.data_; } + bool operator>(Uint32 value) const { return data_ > value.data_; } + Uint32 operator+(Uint32 value) const { return Uint32(data_ + value.data_); } + Uint32 operator-(Uint32 value) const { return Uint32(data_ - value.data_); } + Uint32 operator&(Uint32 value) const { return Uint32(data_ & value.data_); } + Uint32 operator&=(Uint32 value) { + data_ &= value.data_; + return *this; + } + Uint32 operator^(Uint32 value) const { return Uint32(data_ ^ value.data_); } + Uint32 operator^=(Uint32 value) { + data_ ^= value.data_; + return *this; + } + Uint32 operator|(Uint32 value) const { return Uint32(data_ | value.data_); } + Uint32 operator|=(Uint32 value) { + data_ |= value.data_; + return *this; + } + // Unlike uint32_t, the shift functions can accept negative shift and + // return 0 when the shift is too big. + Uint32 operator>>(int shift) const { + if (shift == 0) return *this; + if (shift < 0) { + int tmp = -shift; + if (tmp >= 32) return Uint32(0); + return Uint32(data_ << tmp); + } + int tmp = shift; + if (tmp >= 32) return Uint32(0); + return Uint32(data_ >> tmp); + } + Uint32 operator<<(int shift) const { + if (shift == 0) return *this; + if (shift < 0) { + int tmp = -shift; + if (tmp >= 32) return Uint32(0); + return Uint32(data_ >> tmp); + } + int tmp = shift; + if (tmp >= 32) return Uint32(0); + return Uint32(data_ << tmp); + } +}; + +class Uint64 { + uint64_t data_; + + public: + // Unlike uint64_t, Uint64 has a default constructor. + Uint64() { data_ = 0; } + explicit Uint64(uint64_t data) : data_(data) {} + explicit Uint64(Uint32 data) : data_(data.Get()) {} + inline explicit Uint64(Uint128 data); + uint64_t Get() const { return data_; } + int64_t GetSigned(int N) const { + return ExtractSignedBitfield64(N - 1, 0, data_); + } + int64_t GetSigned() const { return data_; } + Uint32 ToUint32() const { + VIXL_ASSERT((data_ >> 32) == 0); + return Uint32(static_cast(data_)); + } + Uint32 GetHigh32() const { return Uint32(data_ >> 32); } + Uint32 GetLow32() const { return Uint32(data_ & 0xffffffff); } + Uint64 operator~() const { return Uint64(~data_); } + Uint64 operator-() const { return Uint64(-data_); } + bool operator==(Uint64 value) const { return data_ == value.data_; } + bool operator!=(Uint64 value) const { return data_ != value.data_; } + Uint64 operator+(Uint64 value) const { return Uint64(data_ + value.data_); } + Uint64 operator-(Uint64 value) const { return Uint64(data_ - value.data_); } + Uint64 operator&(Uint64 value) const { return Uint64(data_ & value.data_); } + Uint64 operator&=(Uint64 value) { + data_ &= value.data_; + return *this; + } + Uint64 operator^(Uint64 value) const { return Uint64(data_ ^ value.data_); } + Uint64 operator^=(Uint64 value) { + data_ ^= value.data_; + return *this; + } + Uint64 operator|(Uint64 value) const { return Uint64(data_ | value.data_); } + Uint64 operator|=(Uint64 value) { + data_ |= value.data_; + return *this; + } + // Unlike uint64_t, the shift functions can accept negative shift and + // return 0 when the shift is too big. + Uint64 operator>>(int shift) const { + if (shift == 0) return *this; + if (shift < 0) { + int tmp = -shift; + if (tmp >= 64) return Uint64(0); + return Uint64(data_ << tmp); + } + int tmp = shift; + if (tmp >= 64) return Uint64(0); + return Uint64(data_ >> tmp); + } + Uint64 operator<<(int shift) const { + if (shift == 0) return *this; + if (shift < 0) { + int tmp = -shift; + if (tmp >= 64) return Uint64(0); + return Uint64(data_ >> tmp); + } + int tmp = shift; + if (tmp >= 64) return Uint64(0); + return Uint64(data_ << tmp); + } +}; + +class Uint128 { + uint64_t data_high_; + uint64_t data_low_; + + public: + Uint128() : data_high_(0), data_low_(0) {} + explicit Uint128(uint64_t data_low) : data_high_(0), data_low_(data_low) {} + explicit Uint128(Uint64 data_low) + : data_high_(0), data_low_(data_low.Get()) {} + Uint128(uint64_t data_high, uint64_t data_low) + : data_high_(data_high), data_low_(data_low) {} + Uint64 ToUint64() const { + VIXL_ASSERT(data_high_ == 0); + return Uint64(data_low_); + } + Uint64 GetHigh64() const { return Uint64(data_high_); } + Uint64 GetLow64() const { return Uint64(data_low_); } + Uint128 operator~() const { return Uint128(~data_high_, ~data_low_); } + bool operator==(Uint128 value) const { + return (data_high_ == value.data_high_) && (data_low_ == value.data_low_); + } + Uint128 operator&(Uint128 value) const { + return Uint128(data_high_ & value.data_high_, data_low_ & value.data_low_); + } + Uint128 operator&=(Uint128 value) { + data_high_ &= value.data_high_; + data_low_ &= value.data_low_; + return *this; + } + Uint128 operator|=(Uint128 value) { + data_high_ |= value.data_high_; + data_low_ |= value.data_low_; + return *this; + } + Uint128 operator>>(int shift) const { + VIXL_ASSERT((shift >= 0) && (shift < 128)); + if (shift == 0) return *this; + if (shift >= 64) { + return Uint128(0, data_high_ >> (shift - 64)); + } + uint64_t tmp = (data_high_ << (64 - shift)) | (data_low_ >> shift); + return Uint128(data_high_ >> shift, tmp); + } + Uint128 operator<<(int shift) const { + VIXL_ASSERT((shift >= 0) && (shift < 128)); + if (shift == 0) return *this; + if (shift >= 64) { + return Uint128(data_low_ << (shift - 64), 0); + } + uint64_t tmp = (data_high_ << shift) | (data_low_ >> (64 - shift)); + return Uint128(tmp, data_low_ << shift); + } +}; + +Uint32::Uint32(Uint64 data) : data_(data.ToUint32().Get()) {} +Uint64::Uint64(Uint128 data) : data_(data.ToUint64().Get()) {} + +Int64 BitCount(Uint32 value); + +} // namespace internal + +// The default NaN values (for FPCR.DN=1). +extern const double kFP64DefaultNaN; +extern const float kFP32DefaultNaN; +extern const Float16 kFP16DefaultNaN; + +// Floating-point infinity values. +extern const Float16 kFP16PositiveInfinity; +extern const Float16 kFP16NegativeInfinity; +extern const float kFP32PositiveInfinity; +extern const float kFP32NegativeInfinity; +extern const double kFP64PositiveInfinity; +extern const double kFP64NegativeInfinity; + +// Floating-point zero values. +extern const Float16 kFP16PositiveZero; +extern const Float16 kFP16NegativeZero; + +// AArch64 floating-point specifics. These match IEEE-754. +const unsigned kDoubleMantissaBits = 52; +const unsigned kDoubleExponentBits = 11; +const unsigned kFloatMantissaBits = 23; +const unsigned kFloatExponentBits = 8; +const unsigned kFloat16MantissaBits = 10; +const unsigned kFloat16ExponentBits = 5; + +enum FPRounding { + // The first four values are encodable directly by FPCR. + FPTieEven = 0x0, + FPPositiveInfinity = 0x1, + FPNegativeInfinity = 0x2, + FPZero = 0x3, + + // The final rounding modes are only available when explicitly specified by + // the instruction (such as with fcvta). It cannot be set in FPCR. + FPTieAway, + FPRoundOdd +}; + +enum UseDefaultNaN { kUseDefaultNaN, kIgnoreDefaultNaN }; + +// Assemble the specified IEEE-754 components into the target type and apply +// appropriate rounding. +// sign: 0 = positive, 1 = negative +// exponent: Unbiased IEEE-754 exponent. +// mantissa: The mantissa of the input. The top bit (which is not encoded for +// normal IEEE-754 values) must not be omitted. This bit has the +// value 'pow(2, exponent)'. +// +// The input value is assumed to be a normalized value. That is, the input may +// not be infinity or NaN. If the source value is subnormal, it must be +// normalized before calling this function such that the highest set bit in the +// mantissa has the value 'pow(2, exponent)'. +// +// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than +// calling a templated FPRound. +template +T FPRound(int64_t sign, + int64_t exponent, + uint64_t mantissa, + FPRounding round_mode) { + VIXL_ASSERT((sign == 0) || (sign == 1)); + + // Only FPTieEven and FPRoundOdd rounding modes are implemented. + VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd)); + + // Rounding can promote subnormals to normals, and normals to infinities. For + // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be + // encodable as a float, but rounding based on the low-order mantissa bits + // could make it overflow. With ties-to-even rounding, this value would become + // an infinity. + + // ---- Rounding Method ---- + // + // The exponent is irrelevant in the rounding operation, so we treat the + // lowest-order bit that will fit into the result ('onebit') as having + // the value '1'. Similarly, the highest-order bit that won't fit into + // the result ('halfbit') has the value '0.5'. The 'point' sits between + // 'onebit' and 'halfbit': + // + // These bits fit into the result. + // |---------------------| + // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + // || + // / | + // / halfbit + // onebit + // + // For subnormal outputs, the range of representable bits is smaller and + // the position of onebit and halfbit depends on the exponent of the + // input, but the method is otherwise similar. + // + // onebit(frac) + // | + // | halfbit(frac) halfbit(adjusted) + // | / / + // | | | + // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00 + // 0b00.0... -> 0b00.0... -> 0b00 + // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00 + // 0b00.1... -> 0b00.1... -> 0b01 + // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01 + // 0b01.0... -> 0b01.0... -> 0b01 + // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10 + // 0b01.1... -> 0b01.1... -> 0b10 + // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10 + // 0b10.0... -> 0b10.0... -> 0b10 + // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10 + // 0b10.1... -> 0b10.1... -> 0b11 + // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11 + // ... / | / | + // / | / | + // / | + // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / | + // + // mantissa = (mantissa >> shift) + halfbit(adjusted); + + static const int mantissa_offset = 0; + static const int exponent_offset = mantissa_offset + mbits; + static const int sign_offset = exponent_offset + ebits; + VIXL_ASSERT(sign_offset == (sizeof(T) * 8 - 1)); + + // Bail out early for zero inputs. + if (mantissa == 0) { + return static_cast(sign << sign_offset); + } + + // If all bits in the exponent are set, the value is infinite or NaN. + // This is true for all binary IEEE-754 formats. + static const int infinite_exponent = (1 << ebits) - 1; + static const int max_normal_exponent = infinite_exponent - 1; + + // Apply the exponent bias to encode it for the result. Doing this early makes + // it easy to detect values that will be infinite or subnormal. + exponent += max_normal_exponent >> 1; + + if (exponent > max_normal_exponent) { + // Overflow: the input is too large for the result type to represent. + if (round_mode == FPTieEven) { + // FPTieEven rounding mode handles overflows using infinities. + exponent = infinite_exponent; + mantissa = 0; + } else { + VIXL_ASSERT(round_mode == FPRoundOdd); + // FPRoundOdd rounding mode handles overflows using the largest magnitude + // normal number. + exponent = max_normal_exponent; + mantissa = (UINT64_C(1) << exponent_offset) - 1; + } + return static_cast((sign << sign_offset) | + (exponent << exponent_offset) | + (mantissa << mantissa_offset)); + } + + // Calculate the shift required to move the top mantissa bit to the proper + // place in the destination type. + const int highest_significant_bit = 63 - CountLeadingZeros(mantissa); + int shift = highest_significant_bit - mbits; + + if (exponent <= 0) { + // The output will be subnormal (before rounding). + // For subnormal outputs, the shift must be adjusted by the exponent. The +1 + // is necessary because the exponent of a subnormal value (encoded as 0) is + // the same as the exponent of the smallest normal value (encoded as 1). + shift += -exponent + 1; + + // Handle inputs that would produce a zero output. + // + // Shifts higher than highest_significant_bit+1 will always produce a zero + // result. A shift of exactly highest_significant_bit+1 might produce a + // non-zero result after rounding. + if (shift > (highest_significant_bit + 1)) { + if (round_mode == FPTieEven) { + // The result will always be +/-0.0. + return static_cast(sign << sign_offset); + } else { + VIXL_ASSERT(round_mode == FPRoundOdd); + VIXL_ASSERT(mantissa != 0); + // For FPRoundOdd, if the mantissa is too small to represent and + // non-zero return the next "odd" value. + return static_cast((sign << sign_offset) | 1); + } + } + + // Properly encode the exponent for a subnormal output. + exponent = 0; + } else { + // Clear the topmost mantissa bit, since this is not encoded in IEEE-754 + // normal values. + mantissa &= ~(UINT64_C(1) << highest_significant_bit); + } + + // The casts below are only well-defined for unsigned integers. + VIXL_STATIC_ASSERT(std::numeric_limits::is_integer); + VIXL_STATIC_ASSERT(!std::numeric_limits::is_signed); + + if (shift > 0) { + if (round_mode == FPTieEven) { + // We have to shift the mantissa to the right. Some precision is lost, so + // we need to apply rounding. + uint64_t onebit_mantissa = (mantissa >> (shift)) & 1; + uint64_t halfbit_mantissa = (mantissa >> (shift - 1)) & 1; + uint64_t adjustment = (halfbit_mantissa & ~onebit_mantissa); + uint64_t adjusted = mantissa - adjustment; + T halfbit_adjusted = (adjusted >> (shift - 1)) & 1; + + T result = + static_cast((sign << sign_offset) | (exponent << exponent_offset) | + ((mantissa >> shift) << mantissa_offset)); + + // A very large mantissa can overflow during rounding. If this happens, + // the exponent should be incremented and the mantissa set to 1.0 + // (encoded as 0). Applying halfbit_adjusted after assembling the float + // has the nice side-effect that this case is handled for free. + // + // This also handles cases where a very large finite value overflows to + // infinity, or where a very large subnormal value overflows to become + // normal. + return result + halfbit_adjusted; + } else { + VIXL_ASSERT(round_mode == FPRoundOdd); + // If any bits at position halfbit or below are set, onebit (ie. the + // bottom bit of the resulting mantissa) must be set. + uint64_t fractional_bits = mantissa & ((UINT64_C(1) << shift) - 1); + if (fractional_bits != 0) { + mantissa |= UINT64_C(1) << shift; + } + + return static_cast((sign << sign_offset) | + (exponent << exponent_offset) | + ((mantissa >> shift) << mantissa_offset)); + } + } else { + // We have to shift the mantissa to the left (or not at all). The input + // mantissa is exactly representable in the output mantissa, so apply no + // rounding correction. + return static_cast((sign << sign_offset) | + (exponent << exponent_offset) | + ((mantissa << -shift) << mantissa_offset)); + } +} + + +// See FPRound for a description of this function. +inline double FPRoundToDouble(int64_t sign, + int64_t exponent, + uint64_t mantissa, + FPRounding round_mode) { + uint64_t bits = + FPRound(sign, + exponent, + mantissa, + round_mode); + return RawbitsToDouble(bits); +} + + +// See FPRound for a description of this function. +inline Float16 FPRoundToFloat16(int64_t sign, + int64_t exponent, + uint64_t mantissa, + FPRounding round_mode) { + return RawbitsToFloat16( + FPRound(sign, exponent, mantissa, round_mode)); +} + + +// See FPRound for a description of this function. +static inline float FPRoundToFloat(int64_t sign, + int64_t exponent, + uint64_t mantissa, + FPRounding round_mode) { + uint32_t bits = + FPRound(sign, + exponent, + mantissa, + round_mode); + return RawbitsToFloat(bits); +} + + +float FPToFloat(Float16 value, UseDefaultNaN DN, bool* exception = NULL); +float FPToFloat(double value, + FPRounding round_mode, + UseDefaultNaN DN, + bool* exception = NULL); + +double FPToDouble(Float16 value, UseDefaultNaN DN, bool* exception = NULL); +double FPToDouble(float value, UseDefaultNaN DN, bool* exception = NULL); + +Float16 FPToFloat16(float value, + FPRounding round_mode, + UseDefaultNaN DN, + bool* exception = NULL); + +Float16 FPToFloat16(double value, + FPRounding round_mode, + UseDefaultNaN DN, + bool* exception = NULL); +} // namespace vixl + +#endif // VIXL_UTILS_H diff --git a/module/src/main/cpp/whale/src/assembler/x86/assembler_x86.cc b/module/src/main/cpp/whale/src/assembler/x86/assembler_x86.cc new file mode 100644 index 00000000..6313fb5d --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/x86/assembler_x86.cc @@ -0,0 +1,3092 @@ +#include "assembler/x86/assembler_x86.h" + +namespace whale { +namespace x86 { + + +uint8_t X86Assembler::EmitVexByteZero(bool is_two_byte) { + uint8_t vex_zero = 0xC0; + if (!is_two_byte) { + vex_zero |= 0xC4; + } else { + vex_zero |= 0xC5; + } + return vex_zero; +} + +uint8_t X86Assembler::EmitVexByte1(bool r, bool x, bool b, int mmmmm ) { + // VEX Byte 1 + uint8_t vex_prefix = 0; + if (!r) { + vex_prefix |= 0x80; // VEX.R + } + if (!x) { + vex_prefix |= 0x40; // VEX.X + } + if (!b) { + vex_prefix |= 0x20; // VEX.B + } + + // VEX.mmmmm + switch (mmmmm) { + case 1: + // implied 0F leading opcode byte + vex_prefix |= 0x01; + break; + case 2: + // implied leading 0F 38 opcode byte + vex_prefix |= 0x02; + break; + case 3: + // implied leading OF 3A opcode byte + vex_prefix |= 0x03; + break; + default: + LOG(FATAL) << "unknown opcode bytes"; + } + return vex_prefix; +} + +uint8_t X86Assembler::EmitVexByte2(bool w, int l, X86ManagedRegister operand, int pp) { + uint8_t vex_prefix = 0; + // VEX Byte 2 + if (w) { + vex_prefix |= 0x80; + } + // VEX.vvvv + if (operand.IsXmmRegister()) { + XmmRegister vvvv = operand.AsXmmRegister(); + int inverted_reg = 15-static_cast(vvvv); + uint8_t reg = static_cast(inverted_reg); + vex_prefix |= ((reg & 0x0F) << 3); + } else if (operand.IsCpuRegister()) { + Register vvvv = operand.AsCpuRegister(); + int inverted_reg = 15 - static_cast(vvvv); + uint8_t reg = static_cast(inverted_reg); + vex_prefix |= ((reg & 0x0F) << 3); + } + + // VEX.L + if (l == 256) { + vex_prefix |= 0x04; + } + + // VEX.pp + switch (pp) { + case 0: + // SIMD Pefix - None + vex_prefix |= 0x00; + break; + case 1: + // SIMD Prefix - 66 + vex_prefix |= 0x01; + break; + case 2: + // SIMD Prefix - F3 + vex_prefix |= 0x02; + break; + case 3: + // SIMD Prefix - F2 + vex_prefix |= 0x03; + break; + default: + LOG(FATAL) << "unknown SIMD Prefix"; + } + + return vex_prefix; +} + +void X86Assembler::call(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xFF); + EmitRegisterOperand(2, reg); +} + + +void X86Assembler::call(const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xFF); + EmitOperand(2, address); +} + + +void X86Assembler::call(Label* label) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xE8); + static const int kSize = 5; + // Offset by one because we already have emitted the opcode. + EmitLabel(label, kSize - 1); +} + + +void X86Assembler::call(const ExternalLabel& label) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + intptr_t call_start = buffer_.GetPosition(); + EmitUint8(0xE8); + EmitInt32(label.address()); + static const intptr_t kCallExternalLabelSize = 5; + DCHECK_EQ((buffer_.GetPosition() - call_start), kCallExternalLabelSize); +} + + +void X86Assembler::pushl(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x50 + reg); +} + + +void X86Assembler::pushl(const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xFF); + EmitOperand(6, address); +} + + +void X86Assembler::pushl(const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + if (imm.is_int8()) { + EmitUint8(0x6A); + EmitUint8(imm.value() & 0xFF); + } else { + EmitUint8(0x68); + EmitImmediate(imm); + } +} + + +void X86Assembler::popl(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x58 + reg); +} + + +void X86Assembler::popl(const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x8F); + EmitOperand(0, address); +} + + +void X86Assembler::movl(Register dst, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xB8 + dst); + EmitImmediate(imm); +} + + +void X86Assembler::movl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x89); + EmitRegisterOperand(src, dst); +} + + +void X86Assembler::movl(Register dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x8B); + EmitOperand(dst, src); +} + + +void X86Assembler::movl(const Address& dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x89); + EmitOperand(src, dst); +} + + +void X86Assembler::movl(const Address& dst, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xC7); + EmitOperand(0, dst); + EmitImmediate(imm); +} + +void X86Assembler::movl(const Address& dst, Label* lbl) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xC7); + EmitOperand(0, dst); + EmitLabel(lbl, dst.length_ + 5); +} + +void X86Assembler::movntl(const Address& dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xC3); + EmitOperand(src, dst); +} + +void X86Assembler::blsi(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false); + uint8_t byte_one = EmitVexByte1(/*r=*/ false, + /*x=*/ false, + /*b=*/ false, + /*mmmmm=*/ 2); + uint8_t byte_two = EmitVexByte2(/*w=*/ false, + /*l=*/ 128, + X86ManagedRegister::FromCpuRegister(dst), + /*pp=*/ 0); + EmitUint8(byte_zero); + EmitUint8(byte_one); + EmitUint8(byte_two); + EmitUint8(0xF3); + EmitRegisterOperand(3, src); +} + +void X86Assembler::blsmsk(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false); + uint8_t byte_one = EmitVexByte1(/*r=*/ false, + /*x=*/ false, + /*b=*/ false, + /*mmmmm=*/ 2); + uint8_t byte_two = EmitVexByte2(/*w=*/ false, + /*l=*/ 128, + X86ManagedRegister::FromCpuRegister(dst), + /*pp=*/ 0); + EmitUint8(byte_zero); + EmitUint8(byte_one); + EmitUint8(byte_two); + EmitUint8(0xF3); + EmitRegisterOperand(2, src); +} + +void X86Assembler::blsr(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false); + uint8_t byte_one = EmitVexByte1(/*r=*/ false, + /*x=*/ false, + /*b=*/ false, + /*mmmmm=*/ 2); + uint8_t byte_two = EmitVexByte2(/*w=*/ false, + /*l=*/ 128, + X86ManagedRegister::FromCpuRegister(dst), + /*pp=*/ 0); + EmitUint8(byte_zero); + EmitUint8(byte_one); + EmitUint8(byte_two); + EmitUint8(0xF3); + EmitRegisterOperand(1, src); +} + +void X86Assembler::bswapl(Register dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xC8 + dst); +} + +void X86Assembler::bsfl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xBC); + EmitRegisterOperand(dst, src); +} + +void X86Assembler::bsfl(Register dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xBC); + EmitOperand(dst, src); +} + +void X86Assembler::bsrl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xBD); + EmitRegisterOperand(dst, src); +} + +void X86Assembler::bsrl(Register dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xBD); + EmitOperand(dst, src); +} + +void X86Assembler::popcntl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0xB8); + EmitRegisterOperand(dst, src); +} + +void X86Assembler::popcntl(Register dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0xB8); + EmitOperand(dst, src); +} + +void X86Assembler::movzxb(Register dst, ByteRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xB6); + EmitRegisterOperand(dst, src); +} + + +void X86Assembler::movzxb(Register dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xB6); + EmitOperand(dst, src); +} + + +void X86Assembler::movsxb(Register dst, ByteRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xBE); + EmitRegisterOperand(dst, src); +} + + +void X86Assembler::movsxb(Register dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xBE); + EmitOperand(dst, src); +} + + +void X86Assembler::movb(Register /*dst*/, const Address& /*src*/) { + LOG(FATAL) << "Use movzxb or movsxb instead."; +} + + +void X86Assembler::movb(const Address& dst, ByteRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x88); + EmitOperand(src, dst); +} + + +void X86Assembler::movb(const Address& dst, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xC6); + EmitOperand(EAX, dst); + CHECK(imm.is_int8()); + EmitUint8(imm.value() & 0xFF); +} + + +void X86Assembler::movzxw(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xB7); + EmitRegisterOperand(dst, src); +} + + +void X86Assembler::movzxw(Register dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xB7); + EmitOperand(dst, src); +} + + +void X86Assembler::movsxw(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xBF); + EmitRegisterOperand(dst, src); +} + + +void X86Assembler::movsxw(Register dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xBF); + EmitOperand(dst, src); +} + + +void X86Assembler::movw(Register /*dst*/, const Address& /*src*/) { + LOG(FATAL) << "Use movzxw or movsxw instead."; +} + + +void X86Assembler::movw(const Address& dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOperandSizeOverride(); + EmitUint8(0x89); + EmitOperand(src, dst); +} + + +void X86Assembler::movw(const Address& dst, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOperandSizeOverride(); + EmitUint8(0xC7); + EmitOperand(0, dst); + CHECK(imm.is_uint16() || imm.is_int16()); + EmitUint8(imm.value() & 0xFF); + EmitUint8(imm.value() >> 8); +} + + +void X86Assembler::leal(Register dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x8D); + EmitOperand(dst, src); +} + + +void X86Assembler::cmovl(Condition condition, Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x40 + condition); + EmitRegisterOperand(dst, src); +} + + +void X86Assembler::cmovl(Condition condition, Register dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x40 + condition); + EmitOperand(dst, src); +} + + +void X86Assembler::setb(Condition condition, Register dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x90 + condition); + EmitOperand(0, Operand(dst)); +} + + +void X86Assembler::movaps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x28); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::movaps(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x28); + EmitOperand(dst, src); +} + + +void X86Assembler::movups(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x10); + EmitOperand(dst, src); +} + + +void X86Assembler::movaps(const Address& dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x29); + EmitOperand(src, dst); +} + + +void X86Assembler::movups(const Address& dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x11); + EmitOperand(src, dst); +} + + +void X86Assembler::movss(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x10); + EmitOperand(dst, src); +} + + +void X86Assembler::movss(const Address& dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x11); + EmitOperand(src, dst); +} + + +void X86Assembler::movss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x11); + EmitXmmRegisterOperand(src, dst); +} + + +void X86Assembler::movd(XmmRegister dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x6E); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::movd(Register dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x7E); + EmitOperand(src, Operand(dst)); +} + + +void X86Assembler::addss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x58); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::addss(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x58); + EmitOperand(dst, src); +} + + +void X86Assembler::subss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x5C); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::subss(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x5C); + EmitOperand(dst, src); +} + + +void X86Assembler::mulss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x59); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::mulss(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x59); + EmitOperand(dst, src); +} + + +void X86Assembler::divss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x5E); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::divss(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x5E); + EmitOperand(dst, src); +} + + +void X86Assembler::addps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x58); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::subps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x5C); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::mulps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x59); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::divps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x5E); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::movapd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x28); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::movapd(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x28); + EmitOperand(dst, src); +} + + +void X86Assembler::movupd(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x10); + EmitOperand(dst, src); +} + + +void X86Assembler::movapd(const Address& dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x29); + EmitOperand(src, dst); +} + + +void X86Assembler::movupd(const Address& dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x11); + EmitOperand(src, dst); +} + + +void X86Assembler::flds(const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitOperand(0, src); +} + + +void X86Assembler::fsts(const Address& dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitOperand(2, dst); +} + + +void X86Assembler::fstps(const Address& dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitOperand(3, dst); +} + + +void X86Assembler::movsd(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x10); + EmitOperand(dst, src); +} + + +void X86Assembler::movsd(const Address& dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x11); + EmitOperand(src, dst); +} + + +void X86Assembler::movsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x11); + EmitXmmRegisterOperand(src, dst); +} + + +void X86Assembler::movhpd(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x16); + EmitOperand(dst, src); +} + + +void X86Assembler::movhpd(const Address& dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x17); + EmitOperand(src, dst); +} + + +void X86Assembler::addsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x58); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::addsd(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x58); + EmitOperand(dst, src); +} + + +void X86Assembler::subsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x5C); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::subsd(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x5C); + EmitOperand(dst, src); +} + + +void X86Assembler::mulsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x59); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::mulsd(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x59); + EmitOperand(dst, src); +} + + +void X86Assembler::divsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x5E); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::divsd(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x5E); + EmitOperand(dst, src); +} + + +void X86Assembler::addpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x58); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::subpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x5C); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::mulpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x59); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::divpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x5E); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::movdqa(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x6F); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::movdqa(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x6F); + EmitOperand(dst, src); +} + + +void X86Assembler::movdqu(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x6F); + EmitOperand(dst, src); +} + + +void X86Assembler::movdqa(const Address& dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x7F); + EmitOperand(src, dst); +} + + +void X86Assembler::movdqu(const Address& dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x7F); + EmitOperand(src, dst); +} + + +void X86Assembler::paddb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xFC); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::psubb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xF8); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::paddw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xFD); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::psubw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xF9); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pmullw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xD5); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::paddd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xFE); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::psubd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xFA); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pmulld(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x40); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::paddq(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xD4); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::psubq(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xFB); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::paddusb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xDC); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::paddsb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xEC); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::paddusw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xDD); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::paddsw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xED); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::psubusb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xD8); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::psubsb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xE8); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::psubusw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xD9); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::psubsw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xE9); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::cvtsi2ss(XmmRegister dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x2A); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::cvtsi2sd(XmmRegister dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x2A); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::cvtss2si(Register dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x2D); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::cvtss2sd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x5A); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::cvtsd2si(Register dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x2D); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::cvttss2si(Register dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x2C); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::cvttsd2si(Register dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x2C); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::cvtsd2ss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x5A); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::cvtdq2ps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x5B); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::cvtdq2pd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0xE6); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::comiss(XmmRegister a, XmmRegister b) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x2F); + EmitXmmRegisterOperand(a, b); +} + + +void X86Assembler::comiss(XmmRegister a, const Address& b) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x2F); + EmitOperand(a, b); +} + + +void X86Assembler::comisd(XmmRegister a, XmmRegister b) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x2F); + EmitXmmRegisterOperand(a, b); +} + + +void X86Assembler::comisd(XmmRegister a, const Address& b) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x2F); + EmitOperand(a, b); +} + + +void X86Assembler::ucomiss(XmmRegister a, XmmRegister b) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x2E); + EmitXmmRegisterOperand(a, b); +} + + +void X86Assembler::ucomiss(XmmRegister a, const Address& b) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x2E); + EmitOperand(a, b); +} + + +void X86Assembler::ucomisd(XmmRegister a, XmmRegister b) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x2E); + EmitXmmRegisterOperand(a, b); +} + + +void X86Assembler::ucomisd(XmmRegister a, const Address& b) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x2E); + EmitOperand(a, b); +} + + +void X86Assembler::roundsd(XmmRegister dst, XmmRegister src, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x3A); + EmitUint8(0x0B); + EmitXmmRegisterOperand(dst, src); + EmitUint8(imm.value()); +} + + +void X86Assembler::roundss(XmmRegister dst, XmmRegister src, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x3A); + EmitUint8(0x0A); + EmitXmmRegisterOperand(dst, src); + EmitUint8(imm.value()); +} + + +void X86Assembler::sqrtsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x51); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::sqrtss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0x0F); + EmitUint8(0x51); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::xorpd(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x57); + EmitOperand(dst, src); +} + + +void X86Assembler::xorpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x57); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::xorps(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x57); + EmitOperand(dst, src); +} + + +void X86Assembler::xorps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x57); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pxor(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xEF); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::andpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x54); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::andpd(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x54); + EmitOperand(dst, src); +} + + +void X86Assembler::andps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x54); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::andps(XmmRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x54); + EmitOperand(dst, src); +} + + +void X86Assembler::pand(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xDB); + EmitXmmRegisterOperand(dst, src); +} + +void X86Assembler::andn(Register dst, Register src1, Register src2) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false); + uint8_t byte_one = EmitVexByte1(/*r=*/ false, + /*x=*/ false, + /*b=*/ false, + /*mmmmm=*/ 2); + uint8_t byte_two = EmitVexByte2(/*w=*/ false, + /*l=*/ 128, + X86ManagedRegister::FromCpuRegister(src1), + /*pp=*/ 0); + EmitUint8(byte_zero); + EmitUint8(byte_one); + EmitUint8(byte_two); + // Opcode field + EmitUint8(0xF2); + EmitRegisterOperand(dst, src2); +} + + +void X86Assembler::andnpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x55); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::andnps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x55); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pandn(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xDF); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::orpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x56); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::orps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x56); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::por(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xEB); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pavgb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xE0); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pavgw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xE3); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::psadbw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xF6); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pmaddwd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xF5); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::phaddw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x01); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::phaddd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x02); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::haddps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x7C); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::haddpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x7C); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::phsubw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x05); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::phsubd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x06); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::hsubps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0x0F); + EmitUint8(0x7D); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::hsubpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x7D); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pminsb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x38); + EmitXmmRegisterOperand(dst, src); +} + +void X86Assembler::pmaxsb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x3C); + EmitXmmRegisterOperand(dst, src); +} + +void X86Assembler::pminsw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xEA); + EmitXmmRegisterOperand(dst, src); +} + +void X86Assembler::pmaxsw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xEE); + EmitXmmRegisterOperand(dst, src); +} + +void X86Assembler::pminsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x39); + EmitXmmRegisterOperand(dst, src); +} + +void X86Assembler::pmaxsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x3D); + EmitXmmRegisterOperand(dst, src); +} + +void X86Assembler::pminub(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xDA); + EmitXmmRegisterOperand(dst, src); +} + +void X86Assembler::pmaxub(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xDE); + EmitXmmRegisterOperand(dst, src); +} + +void X86Assembler::pminuw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x3A); + EmitXmmRegisterOperand(dst, src); +} + +void X86Assembler::pmaxuw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x3E); + EmitXmmRegisterOperand(dst, src); +} + +void X86Assembler::pminud(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x3B); + EmitXmmRegisterOperand(dst, src); +} + +void X86Assembler::pmaxud(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x3F); + EmitXmmRegisterOperand(dst, src); +} + +void X86Assembler::minps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x5D); + EmitXmmRegisterOperand(dst, src); +} + +void X86Assembler::maxps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x5F); + EmitXmmRegisterOperand(dst, src); +} + +void X86Assembler::minpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x5D); + EmitXmmRegisterOperand(dst, src); +} + +void X86Assembler::maxpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x5F); + EmitXmmRegisterOperand(dst, src); +} + +void X86Assembler::pcmpeqb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x74); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pcmpeqw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x75); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pcmpeqd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x76); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pcmpeqq(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x29); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pcmpgtb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x64); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pcmpgtw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x65); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pcmpgtd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x66); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pcmpgtq(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x37); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xC6); + EmitXmmRegisterOperand(dst, src); + EmitUint8(imm.value()); +} + + +void X86Assembler::shufps(XmmRegister dst, XmmRegister src, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xC6); + EmitXmmRegisterOperand(dst, src); + EmitUint8(imm.value()); +} + + +void X86Assembler::pshufd(XmmRegister dst, XmmRegister src, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x70); + EmitXmmRegisterOperand(dst, src); + EmitUint8(imm.value()); +} + + +void X86Assembler::punpcklbw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x60); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::punpcklwd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x61); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::punpckldq(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x62); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::punpcklqdq(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x6C); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::punpckhbw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x68); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::punpckhwd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x69); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::punpckhdq(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x6A); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::punpckhqdq(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x6D); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::psllw(XmmRegister reg, const Immediate& shift_count) { + DCHECK(shift_count.is_uint8()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x71); + EmitXmmRegisterOperand(6, reg); + EmitUint8(shift_count.value()); +} + + +void X86Assembler::pslld(XmmRegister reg, const Immediate& shift_count) { + DCHECK(shift_count.is_uint8()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x72); + EmitXmmRegisterOperand(6, reg); + EmitUint8(shift_count.value()); +} + + +void X86Assembler::psllq(XmmRegister reg, const Immediate& shift_count) { + DCHECK(shift_count.is_uint8()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x73); + EmitXmmRegisterOperand(6, reg); + EmitUint8(shift_count.value()); +} + + +void X86Assembler::psraw(XmmRegister reg, const Immediate& shift_count) { + DCHECK(shift_count.is_uint8()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x71); + EmitXmmRegisterOperand(4, reg); + EmitUint8(shift_count.value()); +} + + +void X86Assembler::psrad(XmmRegister reg, const Immediate& shift_count) { + DCHECK(shift_count.is_uint8()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x72); + EmitXmmRegisterOperand(4, reg); + EmitUint8(shift_count.value()); +} + + +void X86Assembler::psrlw(XmmRegister reg, const Immediate& shift_count) { + DCHECK(shift_count.is_uint8()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x71); + EmitXmmRegisterOperand(2, reg); + EmitUint8(shift_count.value()); +} + + +void X86Assembler::psrld(XmmRegister reg, const Immediate& shift_count) { + DCHECK(shift_count.is_uint8()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x72); + EmitXmmRegisterOperand(2, reg); + EmitUint8(shift_count.value()); +} + + +void X86Assembler::psrlq(XmmRegister reg, const Immediate& shift_count) { + DCHECK(shift_count.is_uint8()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x73); + EmitXmmRegisterOperand(2, reg); + EmitUint8(shift_count.value()); +} + + +void X86Assembler::psrldq(XmmRegister reg, const Immediate& shift_count) { + DCHECK(shift_count.is_uint8()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x73); + EmitXmmRegisterOperand(3, reg); + EmitUint8(shift_count.value()); +} + + +void X86Assembler::fldl(const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDD); + EmitOperand(0, src); +} + + +void X86Assembler::fstl(const Address& dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDD); + EmitOperand(2, dst); +} + + +void X86Assembler::fstpl(const Address& dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDD); + EmitOperand(3, dst); +} + + +void X86Assembler::fstsw() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x9B); + EmitUint8(0xDF); + EmitUint8(0xE0); +} + + +void X86Assembler::fnstcw(const Address& dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitOperand(7, dst); +} + + +void X86Assembler::fldcw(const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitOperand(5, src); +} + + +void X86Assembler::fistpl(const Address& dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDF); + EmitOperand(7, dst); +} + + +void X86Assembler::fistps(const Address& dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDB); + EmitOperand(3, dst); +} + + +void X86Assembler::fildl(const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDF); + EmitOperand(5, src); +} + + +void X86Assembler::filds(const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDB); + EmitOperand(0, src); +} + + +void X86Assembler::fincstp() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitUint8(0xF7); +} + + +void X86Assembler::ffree(const Immediate& index) { + CHECK_LT(index.value(), 7); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDD); + EmitUint8(0xC0 + index.value()); +} + + +void X86Assembler::fsin() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitUint8(0xFE); +} + + +void X86Assembler::fcos() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitUint8(0xFF); +} + + +void X86Assembler::fptan() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitUint8(0xF2); +} + + +void X86Assembler::fucompp() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDA); + EmitUint8(0xE9); +} + + +void X86Assembler::fprem() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitUint8(0xF8); +} + + +void X86Assembler::xchgl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x87); + EmitRegisterOperand(dst, src); +} + + +void X86Assembler::xchgl(Register reg, const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x87); + EmitOperand(reg, address); +} + + +void X86Assembler::cmpb(const Address& address, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x80); + EmitOperand(7, address); + EmitUint8(imm.value() & 0xFF); +} + + +void X86Assembler::cmpw(const Address& address, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitComplex(7, address, imm, /* is_16_op= */ true); +} + + +void X86Assembler::cmpl(Register reg, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(7, Operand(reg), imm); +} + + +void X86Assembler::cmpl(Register reg0, Register reg1) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x3B); + EmitOperand(reg0, Operand(reg1)); +} + + +void X86Assembler::cmpl(Register reg, const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x3B); + EmitOperand(reg, address); +} + + +void X86Assembler::addl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x03); + EmitRegisterOperand(dst, src); +} + + +void X86Assembler::addl(Register reg, const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x03); + EmitOperand(reg, address); +} + + +void X86Assembler::cmpl(const Address& address, Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x39); + EmitOperand(reg, address); +} + + +void X86Assembler::cmpl(const Address& address, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(7, address, imm); +} + + +void X86Assembler::testl(Register reg1, Register reg2) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x85); + EmitRegisterOperand(reg1, reg2); +} + + +void X86Assembler::testl(Register reg, const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x85); + EmitOperand(reg, address); +} + + +void X86Assembler::testl(Register reg, const Immediate& immediate) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + // For registers that have a byte variant (EAX, EBX, ECX, and EDX) + // we only test the byte register to keep the encoding short. + if (immediate.is_uint8() && reg < 4) { + // Use zero-extended 8-bit immediate. + if (reg == EAX) { + EmitUint8(0xA8); + } else { + EmitUint8(0xF6); + EmitUint8(0xC0 + reg); + } + EmitUint8(immediate.value() & 0xFF); + } else if (reg == EAX) { + // Use short form if the destination is EAX. + EmitUint8(0xA9); + EmitImmediate(immediate); + } else { + EmitUint8(0xF7); + EmitOperand(0, Operand(reg)); + EmitImmediate(immediate); + } +} + + +void X86Assembler::testb(const Address& dst, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF6); + EmitOperand(EAX, dst); + CHECK(imm.is_int8()); + EmitUint8(imm.value() & 0xFF); +} + + +void X86Assembler::testl(const Address& dst, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF7); + EmitOperand(0, dst); + EmitImmediate(imm); +} + + +void X86Assembler::andl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x23); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::andl(Register reg, const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x23); + EmitOperand(reg, address); +} + + +void X86Assembler::andl(Register dst, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(4, Operand(dst), imm); +} + + +void X86Assembler::orl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0B); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::orl(Register reg, const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0B); + EmitOperand(reg, address); +} + + +void X86Assembler::orl(Register dst, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(1, Operand(dst), imm); +} + + +void X86Assembler::xorl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x33); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::xorl(Register reg, const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x33); + EmitOperand(reg, address); +} + + +void X86Assembler::xorl(Register dst, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(6, Operand(dst), imm); +} + + +void X86Assembler::addl(Register reg, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(0, Operand(reg), imm); +} + + +void X86Assembler::addl(const Address& address, Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x01); + EmitOperand(reg, address); +} + + +void X86Assembler::addl(const Address& address, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(0, address, imm); +} + + +void X86Assembler::addw(const Address& address, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_uint16() || imm.is_int16()) << imm.value(); + EmitUint8(0x66); + EmitComplex(0, address, imm, /* is_16_op= */ true); +} + + +void X86Assembler::adcl(Register reg, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(2, Operand(reg), imm); +} + + +void X86Assembler::adcl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x13); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::adcl(Register dst, const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x13); + EmitOperand(dst, address); +} + + +void X86Assembler::subl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x2B); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::subl(Register reg, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(5, Operand(reg), imm); +} + + +void X86Assembler::subl(Register reg, const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x2B); + EmitOperand(reg, address); +} + + +void X86Assembler::subl(const Address& address, Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x29); + EmitOperand(reg, address); +} + + +void X86Assembler::cdq() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x99); +} + + +void X86Assembler::idivl(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF7); + EmitUint8(0xF8 | reg); +} + + +void X86Assembler::imull(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xAF); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::imull(Register dst, Register src, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + // See whether imm can be represented as a sign-extended 8bit value. + int32_t v32 = static_cast(imm.value()); + if (IsInt<8>(v32)) { + // Sign-extension works. + EmitUint8(0x6B); + EmitOperand(dst, Operand(src)); + EmitUint8(static_cast(v32 & 0xFF)); + } else { + // Not representable, use full immediate. + EmitUint8(0x69); + EmitOperand(dst, Operand(src)); + EmitImmediate(imm); + } +} + + +void X86Assembler::imull(Register reg, const Immediate& imm) { + imull(reg, reg, imm); +} + + +void X86Assembler::imull(Register reg, const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xAF); + EmitOperand(reg, address); +} + + +void X86Assembler::imull(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF7); + EmitOperand(5, Operand(reg)); +} + + +void X86Assembler::imull(const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF7); + EmitOperand(5, address); +} + + +void X86Assembler::mull(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF7); + EmitOperand(4, Operand(reg)); +} + + +void X86Assembler::mull(const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF7); + EmitOperand(4, address); +} + + +void X86Assembler::sbbl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x1B); + EmitOperand(dst, Operand(src)); +} + + +void X86Assembler::sbbl(Register reg, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitComplex(3, Operand(reg), imm); +} + + +void X86Assembler::sbbl(Register dst, const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x1B); + EmitOperand(dst, address); +} + + +void X86Assembler::sbbl(const Address& address, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x19); + EmitOperand(src, address); +} + + +void X86Assembler::incl(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x40 + reg); +} + + +void X86Assembler::incl(const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xFF); + EmitOperand(0, address); +} + + +void X86Assembler::decl(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x48 + reg); +} + + +void X86Assembler::decl(const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xFF); + EmitOperand(1, address); +} + + +void X86Assembler::shll(Register reg, const Immediate& imm) { + EmitGenericShift(4, Operand(reg), imm); +} + + +void X86Assembler::shll(Register operand, Register shifter) { + EmitGenericShift(4, Operand(operand), shifter); +} + + +void X86Assembler::shll(const Address& address, const Immediate& imm) { + EmitGenericShift(4, address, imm); +} + + +void X86Assembler::shll(const Address& address, Register shifter) { + EmitGenericShift(4, address, shifter); +} + + +void X86Assembler::shrl(Register reg, const Immediate& imm) { + EmitGenericShift(5, Operand(reg), imm); +} + + +void X86Assembler::shrl(Register operand, Register shifter) { + EmitGenericShift(5, Operand(operand), shifter); +} + + +void X86Assembler::shrl(const Address& address, const Immediate& imm) { + EmitGenericShift(5, address, imm); +} + + +void X86Assembler::shrl(const Address& address, Register shifter) { + EmitGenericShift(5, address, shifter); +} + + +void X86Assembler::sarl(Register reg, const Immediate& imm) { + EmitGenericShift(7, Operand(reg), imm); +} + + +void X86Assembler::sarl(Register operand, Register shifter) { + EmitGenericShift(7, Operand(operand), shifter); +} + + +void X86Assembler::sarl(const Address& address, const Immediate& imm) { + EmitGenericShift(7, address, imm); +} + + +void X86Assembler::sarl(const Address& address, Register shifter) { + EmitGenericShift(7, address, shifter); +} + + +void X86Assembler::shld(Register dst, Register src, Register shifter) { + DCHECK_EQ(ECX, shifter); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xA5); + EmitRegisterOperand(src, dst); +} + + +void X86Assembler::shld(Register dst, Register src, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xA4); + EmitRegisterOperand(src, dst); + EmitUint8(imm.value() & 0xFF); +} + + +void X86Assembler::shrd(Register dst, Register src, Register shifter) { + DCHECK_EQ(ECX, shifter); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xAD); + EmitRegisterOperand(src, dst); +} + + +void X86Assembler::shrd(Register dst, Register src, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xAC); + EmitRegisterOperand(src, dst); + EmitUint8(imm.value() & 0xFF); +} + + +void X86Assembler::roll(Register reg, const Immediate& imm) { + EmitGenericShift(0, Operand(reg), imm); +} + + +void X86Assembler::roll(Register operand, Register shifter) { + EmitGenericShift(0, Operand(operand), shifter); +} + + +void X86Assembler::rorl(Register reg, const Immediate& imm) { + EmitGenericShift(1, Operand(reg), imm); +} + + +void X86Assembler::rorl(Register operand, Register shifter) { + EmitGenericShift(1, Operand(operand), shifter); +} + + +void X86Assembler::negl(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF7); + EmitOperand(3, Operand(reg)); +} + + +void X86Assembler::notl(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF7); + EmitUint8(0xD0 | reg); +} + + +void X86Assembler::enter(const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xC8); + CHECK(imm.is_uint16()); + EmitUint8(imm.value() & 0xFF); + EmitUint8((imm.value() >> 8) & 0xFF); + EmitUint8(0x00); +} + + +void X86Assembler::leave() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xC9); +} + + +void X86Assembler::ret() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xC3); +} + + +void X86Assembler::ret(const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xC2); + CHECK(imm.is_uint16()); + EmitUint8(imm.value() & 0xFF); + EmitUint8((imm.value() >> 8) & 0xFF); +} + + + +void X86Assembler::nop() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x90); +} + + +void X86Assembler::int3() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xCC); +} + + +void X86Assembler::hlt() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF4); +} + + +void X86Assembler::j(Condition condition, Label* label) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + if (label->IsBound()) { + static const int kShortSize = 2; + static const int kLongSize = 6; + int offset = label->Position() - buffer_.Size(); + CHECK_LE(offset, 0); + if (IsInt<8>(offset - kShortSize)) { + EmitUint8(0x70 + condition); + EmitUint8((offset - kShortSize) & 0xFF); + } else { + EmitUint8(0x0F); + EmitUint8(0x80 + condition); + EmitInt32(offset - kLongSize); + } + } else { + EmitUint8(0x0F); + EmitUint8(0x80 + condition); + EmitLabelLink(label); + } +} + + +void X86Assembler::j(Condition condition, NearLabel* label) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + if (label->IsBound()) { + static const int kShortSize = 2; + int offset = label->Position() - buffer_.Size(); + CHECK_LE(offset, 0); + CHECK(IsInt<8>(offset - kShortSize)); + EmitUint8(0x70 + condition); + EmitUint8((offset - kShortSize) & 0xFF); + } else { + EmitUint8(0x70 + condition); + EmitLabelLink(label); + } +} + + +void X86Assembler::jecxz(NearLabel* label) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + if (label->IsBound()) { + static const int kShortSize = 2; + int offset = label->Position() - buffer_.Size(); + CHECK_LE(offset, 0); + CHECK(IsInt<8>(offset - kShortSize)); + EmitUint8(0xE3); + EmitUint8((offset - kShortSize) & 0xFF); + } else { + EmitUint8(0xE3); + EmitLabelLink(label); + } +} + + +void X86Assembler::jmp(Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xFF); + EmitRegisterOperand(4, reg); +} + +void X86Assembler::jmp(const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xFF); + EmitOperand(4, address); +} + +void X86Assembler::jmp(Label* label) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + if (label->IsBound()) { + static const int kShortSize = 2; + static const int kLongSize = 5; + int offset = label->Position() - buffer_.Size(); + CHECK_LE(offset, 0); + if (IsInt<8>(offset - kShortSize)) { + EmitUint8(0xEB); + EmitUint8((offset - kShortSize) & 0xFF); + } else { + EmitUint8(0xE9); + EmitInt32(offset - kLongSize); + } + } else { + EmitUint8(0xE9); + EmitLabelLink(label); + } +} + + +void X86Assembler::jmp(NearLabel* label) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + if (label->IsBound()) { + static const int kShortSize = 2; + int offset = label->Position() - buffer_.Size(); + CHECK_LE(offset, 0); + CHECK(IsInt<8>(offset - kShortSize)); + EmitUint8(0xEB); + EmitUint8((offset - kShortSize) & 0xFF); + } else { + EmitUint8(0xEB); + EmitLabelLink(label); + } +} + + +void X86Assembler::repne_scasb() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0xAE); +} + + +void X86Assembler::repne_scasw() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0xF2); + EmitUint8(0xAF); +} + + +void X86Assembler::repe_cmpsb() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0xA6); +} + + +void X86Assembler::repe_cmpsw() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0xF3); + EmitUint8(0xA7); +} + + +void X86Assembler::repe_cmpsl() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0xA7); +} + + +void X86Assembler::rep_movsb() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0xA4); +} + + +void X86Assembler::rep_movsw() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0xF3); + EmitUint8(0xA5); +} + + +X86Assembler* X86Assembler::lock() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF0); + return this; +} + + +void X86Assembler::cmpxchgl(const Address& address, Register reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xB1); + EmitOperand(reg, address); +} + + +void X86Assembler::cmpxchg8b(const Address& address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xC7); + EmitOperand(1, address); +} + + +void X86Assembler::mfence() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xAE); + EmitUint8(0xF0); +} + +X86Assembler* X86Assembler::fs() { + // TODO: fs is a prefix and not an instruction + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x64); + return this; +} + +X86Assembler* X86Assembler::gs() { + // TODO: fs is a prefix and not an instruction + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x65); + return this; +} + +void X86Assembler::AddImmediate(Register reg, const Immediate& imm) { + int value = imm.value(); + if (value > 0) { + if (value == 1) { + incl(reg); + } else if (value != 0) { + addl(reg, imm); + } + } else if (value < 0) { + value = -value; + if (value == 1) { + decl(reg); + } else if (value != 0) { + subl(reg, Immediate(value)); + } + } +} + + +void X86Assembler::LoadLongConstant(XmmRegister dst, int64_t value) { + // TODO: Need to have a code constants table. + pushl(Immediate(High32Bits(value))); + pushl(Immediate(Low32Bits(value))); + movsd(dst, Address(ESP, 0)); + addl(ESP, Immediate(2 * sizeof(int32_t))); +} + + +void X86Assembler::LoadDoubleConstant(XmmRegister dst, double value) { + // TODO: Need to have a code constants table. + int64_t constant = bit_cast(value); + LoadLongConstant(dst, constant); +} + + +void X86Assembler::Align(int alignment, int offset) { + CHECK(IsPowerOfTwo(alignment)); + // Emit nop instruction until the real position is aligned. + while (((offset + buffer_.GetPosition()) & (alignment-1)) != 0) { + nop(); + } +} + + +void X86Assembler::Bind(Label* label) { + int bound = buffer_.Size(); + CHECK(!label->IsBound()); // Labels can only be bound once. + while (label->IsLinked()) { + int position = label->LinkPosition(); + int next = buffer_.Load(position); + buffer_.Store(position, bound - (position + 4)); + label->position_ = next; + } + label->BindTo(bound); +} + + +void X86Assembler::Bind(NearLabel* label) { + int bound = buffer_.Size(); + CHECK(!label->IsBound()); // Labels can only be bound once. + while (label->IsLinked()) { + int position = label->LinkPosition(); + uint8_t delta = buffer_.Load(position); + int offset = bound - (position + 1); + CHECK(IsInt<8>(offset)); + buffer_.Store(position, offset); + label->position_ = delta != 0u ? label->position_ - delta : 0; + } + label->BindTo(bound); +} + + +void X86Assembler::EmitOperand(int reg_or_opcode, const Operand& operand) { + CHECK_GE(reg_or_opcode, 0); + CHECK_LT(reg_or_opcode, 8); + const int length = operand.length_; + CHECK_GT(length, 0); + // Emit the ModRM byte updated with the given reg value. + CHECK_EQ(operand.encoding_[0] & 0x38, 0); + EmitUint8(operand.encoding_[0] + (reg_or_opcode << 3)); + // Emit the rest of the encoded operand. + for (int i = 1; i < length; i++) { + EmitUint8(operand.encoding_[i]); + } + AssemblerFixup* fixup = operand.GetFixup(); + if (fixup != nullptr) { + EmitFixup(fixup); + } +} + + +void X86Assembler::EmitImmediate(const Immediate& imm, bool is_16_op) { + if (is_16_op) { + EmitUint8(imm.value() & 0xFF); + EmitUint8(imm.value() >> 8); + } else { + EmitInt32(imm.value()); + } +} + + +void X86Assembler::EmitComplex(int reg_or_opcode, + const Operand& operand, + const Immediate& immediate, + bool is_16_op) { + CHECK_GE(reg_or_opcode, 0); + CHECK_LT(reg_or_opcode, 8); + if (immediate.is_int8()) { + // Use sign-extended 8-bit immediate. + EmitUint8(0x83); + EmitOperand(reg_or_opcode, operand); + EmitUint8(immediate.value() & 0xFF); + } else if (operand.IsRegister(EAX)) { + // Use short form if the destination is eax. + EmitUint8(0x05 + (reg_or_opcode << 3)); + EmitImmediate(immediate, is_16_op); + } else { + EmitUint8(0x81); + EmitOperand(reg_or_opcode, operand); + EmitImmediate(immediate, is_16_op); + } +} + + +void X86Assembler::EmitLabel(Label* label, int instruction_size) { + if (label->IsBound()) { + int offset = label->Position() - buffer_.Size(); + CHECK_LE(offset, 0); + EmitInt32(offset - instruction_size); + } else { + EmitLabelLink(label); + } +} + + +void X86Assembler::EmitLabelLink(Label* label) { + CHECK(!label->IsBound()); + int position = buffer_.Size(); + EmitInt32(label->position_); + label->LinkTo(position); +} + + +void X86Assembler::EmitLabelLink(NearLabel* label) { + CHECK(!label->IsBound()); + int position = buffer_.Size(); + if (label->IsLinked()) { + // Save the delta in the byte that we have to play with. + uint32_t delta = position - label->LinkPosition(); + CHECK(IsUint<8>(delta)); + EmitUint8(delta & 0xFF); + } else { + EmitUint8(0); + } + label->LinkTo(position); +} + + +void X86Assembler::EmitGenericShift(int reg_or_opcode, + const Operand& operand, + const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_int8()); + if (imm.value() == 1) { + EmitUint8(0xD1); + EmitOperand(reg_or_opcode, operand); + } else { + EmitUint8(0xC1); + EmitOperand(reg_or_opcode, operand); + EmitUint8(imm.value() & 0xFF); + } +} + + +void X86Assembler::EmitGenericShift(int reg_or_opcode, + const Operand& operand, + Register shifter) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK_EQ(shifter, ECX); + EmitUint8(0xD3); + EmitOperand(reg_or_opcode, operand); +} + +void X86Assembler::AddConstantArea() { + ArrayRef area = constant_area_.GetBuffer(); + // Generate the data for the literal area. + for (size_t i = 0, e = area.size(); i < e; i++) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitInt32(area[i]); + } +} + +size_t ConstantArea::AppendInt32(int32_t v) { + size_t result = buffer_.size() * elem_size_; + buffer_.push_back(v); + return result; +} + +size_t ConstantArea::AddInt32(int32_t v) { + for (size_t i = 0, e = buffer_.size(); i < e; i++) { + if (v == buffer_[i]) { + return i * elem_size_; + } + } + + // Didn't match anything. + return AppendInt32(v); +} + +size_t ConstantArea::AddInt64(int64_t v) { + int32_t v_low = Low32Bits(v); + int32_t v_high = High32Bits(v); + if (buffer_.size() > 1) { + // Ensure we don't pass the end of the buffer. + for (size_t i = 0, e = buffer_.size() - 1; i < e; i++) { + if (v_low == buffer_[i] && v_high == buffer_[i + 1]) { + return i * elem_size_; + } + } + } + + // Didn't match anything. + size_t result = buffer_.size() * elem_size_; + buffer_.push_back(v_low); + buffer_.push_back(v_high); + return result; +} + +size_t ConstantArea::AddDouble(double v) { + // Treat the value as a 64-bit integer value. + return AddInt64(bit_cast(v)); +} + +size_t ConstantArea::AddFloat(float v) { + // Treat the value as a 32-bit integer value. + return AddInt32(bit_cast(v)); +} + +} // namespace x86 +} // namespace whale diff --git a/module/src/main/cpp/whale/src/assembler/x86/assembler_x86.h b/module/src/main/cpp/whale/src/assembler/x86/assembler_x86.h new file mode 100644 index 00000000..f810fc76 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/x86/assembler_x86.h @@ -0,0 +1,1100 @@ +#ifndef WHALE_ASSEMBLER_ASSEMBLER_X86_H_ +#define WHALE_ASSEMBLER_ASSEMBLER_X86_H_ + +#include +#include +#include "assembler/label.h" +#include "base/bit_utils.h" +#include "base/offsets.h" +#include "assembler/x86/registers_x86.h" +#include "assembler/x86/constants_x86.h" +#include "assembler/x86/managed_register_x86.h" +#include "assembler/assembler.h" + + +namespace whale { +namespace x86 { + +// If true, references within the heap are poisoned (negated). +#ifdef USE_HEAP_POISONING +static constexpr bool kPoisonHeapReferences = true; +#else +static constexpr bool kPoisonHeapReferences = false; +#endif + +class Immediate : public ValueObject { + public: + explicit Immediate(int32_t value_in) : value_(value_in) {} + + int32_t value() const { return value_; } + + bool is_int8() const { return IsInt<8>(value_); } + + bool is_uint8() const { return IsUint<8>(value_); } + + bool is_int16() const { return IsInt<16>(value_); } + + bool is_uint16() const { return IsUint<16>(value_); } + + private: + const int32_t value_; +}; + + +class Operand : public ValueObject { + public: + uint8_t mod() const { + return (encoding_at(0) >> 6) & 3; + } + + Register rm() const { + return static_cast(encoding_at(0) & 7); + } + + ScaleFactor scale() const { + return static_cast((encoding_at(1) >> 6) & 3); + } + + Register index() const { + return static_cast((encoding_at(1) >> 3) & 7); + } + + Register base() const { + return static_cast(encoding_at(1) & 7); + } + + int8_t disp8() const { + CHECK_GE(length_, 2); + return static_cast(encoding_[length_ - 1]); + } + + int32_t disp32() const { + CHECK_GE(length_, 5); + int32_t value; + memcpy(&value, &encoding_[length_ - 4], sizeof(value)); + return value; + } + + bool IsRegister(Register reg) const { + return ((encoding_[0] & 0xF8) == 0xC0) // Addressing mode is register only. + && ((encoding_[0] & 0x07) == reg); // Register codes match. + } + + protected: + // Operand can be sub classed (e.g: Address). + Operand() : length_(0), fixup_(nullptr) {} + + void SetModRM(int mod_in, Register rm_in) { + CHECK_EQ(mod_in & ~3, 0); + encoding_[0] = (mod_in << 6) | rm_in; + length_ = 1; + } + + void SetSIB(ScaleFactor scale_in, Register index_in, Register base_in) { + CHECK_EQ(length_, 1); + CHECK_EQ(scale_in & ~3, 0); + encoding_[1] = (scale_in << 6) | (index_in << 3) | base_in; + length_ = 2; + } + + void SetDisp8(int8_t disp) { + CHECK(length_ == 1 || length_ == 2); + encoding_[length_++] = static_cast(disp); + } + + void SetDisp32(int32_t disp) { + CHECK(length_ == 1 || length_ == 2); + int disp_size = sizeof(disp); + memmove(&encoding_[length_], &disp, disp_size); + length_ += disp_size; + } + + AssemblerFixup *GetFixup() const { + return fixup_; + } + + void SetFixup(AssemblerFixup *fixup) { + fixup_ = fixup; + } + + private: + uint8_t length_; + uint8_t encoding_[6]; + + // A fixup can be associated with the operand, in order to be applied after the + // code has been generated. This is used for constant area fixups. + AssemblerFixup *fixup_; + + explicit Operand(Register reg) : fixup_(nullptr) { SetModRM(3, reg); } + + // Get the operand encoding byte at the given index. + uint8_t encoding_at(int index_in) const { + return encoding_[index_in]; + } + + friend class X86Assembler; +}; + + +class Address : public Operand { + public: + Address(Register base_in, int32_t disp) { + Init(base_in, disp); + } + + Address(Register base_in, int32_t disp, AssemblerFixup *fixup) { + Init(base_in, disp); + SetFixup(fixup); + } + + Address(Register base_in, Offset disp) { + Init(base_in, disp.Int32Value()); + } + + Address(Register base_in, FrameOffset disp) { + CHECK_EQ(base_in, ESP); + Init(ESP, disp.Int32Value()); + } + + Address(Register base_in, MemberOffset disp) { + Init(base_in, disp.Int32Value()); + } + + Address(Register index_in, ScaleFactor scale_in, int32_t disp) { + CHECK_NE(index_in, ESP); // Illegal addressing mode. + SetModRM(0, ESP); + SetSIB(scale_in, index_in, EBP); + SetDisp32(disp); + } + + Address(Register base_in, Register index_in, ScaleFactor scale_in, int32_t disp) { + Init(base_in, index_in, scale_in, disp); + } + + Address(Register base_in, + Register index_in, + ScaleFactor scale_in, + int32_t disp, AssemblerFixup *fixup) { + Init(base_in, index_in, scale_in, disp); + SetFixup(fixup); + } + + static Address Absolute(uintptr_t addr) { + Address result; + result.SetModRM(0, EBP); + result.SetDisp32(addr); + return result; + } + + static Address Absolute(ThreadOffset32 addr) { + return Absolute(addr.Int32Value()); + } + + private: + Address() {} + + void Init(Register base_in, int32_t disp) { + if (disp == 0 && base_in != EBP) { + SetModRM(0, base_in); + if (base_in == ESP) SetSIB(TIMES_1, ESP, base_in); + } else if (disp >= -128 && disp <= 127) { + SetModRM(1, base_in); + if (base_in == ESP) SetSIB(TIMES_1, ESP, base_in); + SetDisp8(disp); + } else { + SetModRM(2, base_in); + if (base_in == ESP) SetSIB(TIMES_1, ESP, base_in); + SetDisp32(disp); + } + } + + void Init(Register base_in, Register index_in, ScaleFactor scale_in, int32_t disp) { + if (disp == 0 && base_in != EBP) { + SetModRM(0, ESP); + SetSIB(scale_in, index_in, base_in); + } else if (disp >= -128 && disp <= 127) { + SetModRM(1, ESP); + SetSIB(scale_in, index_in, base_in); + SetDisp8(disp); + } else { + SetModRM(2, ESP); + SetSIB(scale_in, index_in, base_in); + SetDisp32(disp); + } + } +}; + +// This is equivalent to the Label class, used in a slightly different context. We +// inherit the functionality of the Label class, but prevent unintended +// derived-to-base conversions by making the base class private. +class NearLabel : private Label { + public: + NearLabel() : Label() {} + + // Expose the Label routines that we need. + using Label::Position; + using Label::LinkPosition; + using Label::IsBound; + using Label::IsUnused; + using Label::IsLinked; + + private: + using Label::BindTo; + using Label::LinkTo; + + friend class x86::X86Assembler; + + DISALLOW_COPY_AND_ASSIGN(NearLabel); +}; + +/** + * Class to handle constant area values. + */ +class ConstantArea { + public: + explicit ConstantArea() + : buffer_() {} + + // Add a double to the constant area, returning the offset into + // the constant area where the literal resides. + size_t AddDouble(double v); + + // Add a float to the constant area, returning the offset into + // the constant area where the literal resides. + size_t AddFloat(float v); + + // Add an int32_t to the constant area, returning the offset into + // the constant area where the literal resides. + size_t AddInt32(int32_t v); + + // Add an int32_t to the end of the constant area, returning the offset into + // the constant area where the literal resides. + size_t AppendInt32(int32_t v); + + // Add an int64_t to the constant area, returning the offset into + // the constant area where the literal resides. + size_t AddInt64(int64_t v); + + bool IsEmpty() const { + return buffer_.size() == 0; + } + + size_t GetSize() const { + return buffer_.size() * elem_size_; + } + + ArrayRef GetBuffer() const { + return ArrayRef(buffer_); + } + + private: + static constexpr size_t elem_size_ = sizeof(int32_t); + std::vector buffer_; +}; + +class X86Assembler final : public Assembler { + public: + explicit X86Assembler() : Assembler(), constant_area_() {} + + virtual ~X86Assembler() {} + + /* + * Emit Machine Instructions. + */ + void call(Register reg); + + void call(const Address &address); + + void call(Label *label); + + void call(const ExternalLabel &label); + + void pushl(Register reg); + + void pushl(const Address &address); + + void pushl(const Immediate &imm); + + void popl(Register reg); + + void popl(const Address &address); + + void movl(Register dst, const Immediate &src); + + void movl(Register dst, Register src); + + void movl(Register dst, const Address &src); + + void movl(const Address &dst, Register src); + + void movl(const Address &dst, const Immediate &imm); + + void movl(const Address &dst, Label *lbl); + + void movntl(const Address &dst, Register src); + + void blsi(Register dst, Register src); // no addr variant (for now) + void blsmsk(Register dst, Register src); // no addr variant (for now) + void blsr(Register dst, Register src); // no addr varianr (for now) + + void bswapl(Register dst); + + void bsfl(Register dst, Register src); + + void bsfl(Register dst, const Address &src); + + void bsrl(Register dst, Register src); + + void bsrl(Register dst, const Address &src); + + void popcntl(Register dst, Register src); + + void popcntl(Register dst, const Address &src); + + void rorl(Register reg, const Immediate &imm); + + void rorl(Register operand, Register shifter); + + void roll(Register reg, const Immediate &imm); + + void roll(Register operand, Register shifter); + + void movzxb(Register dst, ByteRegister src); + + void movzxb(Register dst, const Address &src); + + void movsxb(Register dst, ByteRegister src); + + void movsxb(Register dst, const Address &src); + + void movb(Register dst, const Address &src); + + void movb(const Address &dst, ByteRegister src); + + void movb(const Address &dst, const Immediate &imm); + + void movzxw(Register dst, Register src); + + void movzxw(Register dst, const Address &src); + + void movsxw(Register dst, Register src); + + void movsxw(Register dst, const Address &src); + + void movw(Register dst, const Address &src); + + void movw(const Address &dst, Register src); + + void movw(const Address &dst, const Immediate &imm); + + void leal(Register dst, const Address &src); + + void cmovl(Condition condition, Register dst, Register src); + + void cmovl(Condition condition, Register dst, const Address &src); + + void setb(Condition condition, Register dst); + + void movaps(XmmRegister dst, XmmRegister src); // move + void movaps(XmmRegister dst, const Address &src); // load aligned + void movups(XmmRegister dst, const Address &src); // load unaligned + void movaps(const Address &dst, XmmRegister src); // store aligned + void movups(const Address &dst, XmmRegister src); // store unaligned + + void movss(XmmRegister dst, const Address &src); + + void movss(const Address &dst, XmmRegister src); + + void movss(XmmRegister dst, XmmRegister src); + + void movd(XmmRegister dst, Register src); + + void movd(Register dst, XmmRegister src); + + void addss(XmmRegister dst, XmmRegister src); + + void addss(XmmRegister dst, const Address &src); + + void subss(XmmRegister dst, XmmRegister src); + + void subss(XmmRegister dst, const Address &src); + + void mulss(XmmRegister dst, XmmRegister src); + + void mulss(XmmRegister dst, const Address &src); + + void divss(XmmRegister dst, XmmRegister src); + + void divss(XmmRegister dst, const Address &src); + + void addps(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void subps(XmmRegister dst, XmmRegister src); + + void mulps(XmmRegister dst, XmmRegister src); + + void divps(XmmRegister dst, XmmRegister src); + + void movapd(XmmRegister dst, XmmRegister src); // move + void movapd(XmmRegister dst, const Address &src); // load aligned + void movupd(XmmRegister dst, const Address &src); // load unaligned + void movapd(const Address &dst, XmmRegister src); // store aligned + void movupd(const Address &dst, XmmRegister src); // store unaligned + + void movsd(XmmRegister dst, const Address &src); + + void movsd(const Address &dst, XmmRegister src); + + void movsd(XmmRegister dst, XmmRegister src); + + void movhpd(XmmRegister dst, const Address &src); + + void movhpd(const Address &dst, XmmRegister src); + + void addsd(XmmRegister dst, XmmRegister src); + + void addsd(XmmRegister dst, const Address &src); + + void subsd(XmmRegister dst, XmmRegister src); + + void subsd(XmmRegister dst, const Address &src); + + void mulsd(XmmRegister dst, XmmRegister src); + + void mulsd(XmmRegister dst, const Address &src); + + void divsd(XmmRegister dst, XmmRegister src); + + void divsd(XmmRegister dst, const Address &src); + + void addpd(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void subpd(XmmRegister dst, XmmRegister src); + + void mulpd(XmmRegister dst, XmmRegister src); + + void divpd(XmmRegister dst, XmmRegister src); + + void movdqa(XmmRegister dst, XmmRegister src); // move + void movdqa(XmmRegister dst, const Address &src); // load aligned + void movdqu(XmmRegister dst, const Address &src); // load unaligned + void movdqa(const Address &dst, XmmRegister src); // store aligned + void movdqu(const Address &dst, XmmRegister src); // store unaligned + + void paddb(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void psubb(XmmRegister dst, XmmRegister src); + + void paddw(XmmRegister dst, XmmRegister src); + + void psubw(XmmRegister dst, XmmRegister src); + + void pmullw(XmmRegister dst, XmmRegister src); + + void paddd(XmmRegister dst, XmmRegister src); + + void psubd(XmmRegister dst, XmmRegister src); + + void pmulld(XmmRegister dst, XmmRegister src); + + void paddq(XmmRegister dst, XmmRegister src); + + void psubq(XmmRegister dst, XmmRegister src); + + void paddusb(XmmRegister dst, XmmRegister src); + + void paddsb(XmmRegister dst, XmmRegister src); + + void paddusw(XmmRegister dst, XmmRegister src); + + void paddsw(XmmRegister dst, XmmRegister src); + + void psubusb(XmmRegister dst, XmmRegister src); + + void psubsb(XmmRegister dst, XmmRegister src); + + void psubusw(XmmRegister dst, XmmRegister src); + + void psubsw(XmmRegister dst, XmmRegister src); + + void cvtsi2ss(XmmRegister dst, Register src); + + void cvtsi2sd(XmmRegister dst, Register src); + + void cvtss2si(Register dst, XmmRegister src); + + void cvtss2sd(XmmRegister dst, XmmRegister src); + + void cvtsd2si(Register dst, XmmRegister src); + + void cvtsd2ss(XmmRegister dst, XmmRegister src); + + void cvttss2si(Register dst, XmmRegister src); + + void cvttsd2si(Register dst, XmmRegister src); + + void cvtdq2ps(XmmRegister dst, XmmRegister src); + + void cvtdq2pd(XmmRegister dst, XmmRegister src); + + void comiss(XmmRegister a, XmmRegister b); + + void comiss(XmmRegister a, const Address &b); + + void comisd(XmmRegister a, XmmRegister b); + + void comisd(XmmRegister a, const Address &b); + + void ucomiss(XmmRegister a, XmmRegister b); + + void ucomiss(XmmRegister a, const Address &b); + + void ucomisd(XmmRegister a, XmmRegister b); + + void ucomisd(XmmRegister a, const Address &b); + + void roundsd(XmmRegister dst, XmmRegister src, const Immediate &imm); + + void roundss(XmmRegister dst, XmmRegister src, const Immediate &imm); + + void sqrtsd(XmmRegister dst, XmmRegister src); + + void sqrtss(XmmRegister dst, XmmRegister src); + + void xorpd(XmmRegister dst, const Address &src); + + void xorpd(XmmRegister dst, XmmRegister src); + + void xorps(XmmRegister dst, const Address &src); + + void xorps(XmmRegister dst, XmmRegister src); + + void pxor(XmmRegister dst, XmmRegister src); // no addr variant (for now) + + void andpd(XmmRegister dst, XmmRegister src); + + void andpd(XmmRegister dst, const Address &src); + + void andps(XmmRegister dst, XmmRegister src); + + void andps(XmmRegister dst, const Address &src); + + void pand(XmmRegister dst, XmmRegister src); // no addr variant (for now) + + void andn(Register dst, Register src1, Register src2); // no addr variant (for now) + void andnpd(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void andnps(XmmRegister dst, XmmRegister src); + + void pandn(XmmRegister dst, XmmRegister src); + + void orpd(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void orps(XmmRegister dst, XmmRegister src); + + void por(XmmRegister dst, XmmRegister src); + + void pavgb(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void pavgw(XmmRegister dst, XmmRegister src); + + void psadbw(XmmRegister dst, XmmRegister src); + + void pmaddwd(XmmRegister dst, XmmRegister src); + + void phaddw(XmmRegister dst, XmmRegister src); + + void phaddd(XmmRegister dst, XmmRegister src); + + void haddps(XmmRegister dst, XmmRegister src); + + void haddpd(XmmRegister dst, XmmRegister src); + + void phsubw(XmmRegister dst, XmmRegister src); + + void phsubd(XmmRegister dst, XmmRegister src); + + void hsubps(XmmRegister dst, XmmRegister src); + + void hsubpd(XmmRegister dst, XmmRegister src); + + void pminsb(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void pmaxsb(XmmRegister dst, XmmRegister src); + + void pminsw(XmmRegister dst, XmmRegister src); + + void pmaxsw(XmmRegister dst, XmmRegister src); + + void pminsd(XmmRegister dst, XmmRegister src); + + void pmaxsd(XmmRegister dst, XmmRegister src); + + void pminub(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void pmaxub(XmmRegister dst, XmmRegister src); + + void pminuw(XmmRegister dst, XmmRegister src); + + void pmaxuw(XmmRegister dst, XmmRegister src); + + void pminud(XmmRegister dst, XmmRegister src); + + void pmaxud(XmmRegister dst, XmmRegister src); + + void minps(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void maxps(XmmRegister dst, XmmRegister src); + + void minpd(XmmRegister dst, XmmRegister src); + + void maxpd(XmmRegister dst, XmmRegister src); + + void pcmpeqb(XmmRegister dst, XmmRegister src); + + void pcmpeqw(XmmRegister dst, XmmRegister src); + + void pcmpeqd(XmmRegister dst, XmmRegister src); + + void pcmpeqq(XmmRegister dst, XmmRegister src); + + void pcmpgtb(XmmRegister dst, XmmRegister src); + + void pcmpgtw(XmmRegister dst, XmmRegister src); + + void pcmpgtd(XmmRegister dst, XmmRegister src); + + void pcmpgtq(XmmRegister dst, XmmRegister src); // SSE4.2 + + void shufpd(XmmRegister dst, XmmRegister src, const Immediate &imm); + + void shufps(XmmRegister dst, XmmRegister src, const Immediate &imm); + + void pshufd(XmmRegister dst, XmmRegister src, const Immediate &imm); + + void punpcklbw(XmmRegister dst, XmmRegister src); + + void punpcklwd(XmmRegister dst, XmmRegister src); + + void punpckldq(XmmRegister dst, XmmRegister src); + + void punpcklqdq(XmmRegister dst, XmmRegister src); + + void punpckhbw(XmmRegister dst, XmmRegister src); + + void punpckhwd(XmmRegister dst, XmmRegister src); + + void punpckhdq(XmmRegister dst, XmmRegister src); + + void punpckhqdq(XmmRegister dst, XmmRegister src); + + void psllw(XmmRegister reg, const Immediate &shift_count); + + void pslld(XmmRegister reg, const Immediate &shift_count); + + void psllq(XmmRegister reg, const Immediate &shift_count); + + void psraw(XmmRegister reg, const Immediate &shift_count); + + void psrad(XmmRegister reg, const Immediate &shift_count); + // no psraq + + void psrlw(XmmRegister reg, const Immediate &shift_count); + + void psrld(XmmRegister reg, const Immediate &shift_count); + + void psrlq(XmmRegister reg, const Immediate &shift_count); + + void psrldq(XmmRegister reg, const Immediate &shift_count); + + void flds(const Address &src); + + void fstps(const Address &dst); + + void fsts(const Address &dst); + + void fldl(const Address &src); + + void fstpl(const Address &dst); + + void fstl(const Address &dst); + + void fstsw(); + + void fucompp(); + + void fnstcw(const Address &dst); + + void fldcw(const Address &src); + + void fistpl(const Address &dst); + + void fistps(const Address &dst); + + void fildl(const Address &src); + + void filds(const Address &src); + + void fincstp(); + + void ffree(const Immediate &index); + + void fsin(); + + void fcos(); + + void fptan(); + + void fprem(); + + void xchgl(Register dst, Register src); + + void xchgl(Register reg, const Address &address); + + void cmpb(const Address &address, const Immediate &imm); + + void cmpw(const Address &address, const Immediate &imm); + + void cmpl(Register reg, const Immediate &imm); + + void cmpl(Register reg0, Register reg1); + + void cmpl(Register reg, const Address &address); + + void cmpl(const Address &address, Register reg); + + void cmpl(const Address &address, const Immediate &imm); + + void testl(Register reg1, Register reg2); + + void testl(Register reg, const Immediate &imm); + + void testl(Register reg1, const Address &address); + + void testb(const Address &dst, const Immediate &imm); + + void testl(const Address &dst, const Immediate &imm); + + void andl(Register dst, const Immediate &imm); + + void andl(Register dst, Register src); + + void andl(Register dst, const Address &address); + + void orl(Register dst, const Immediate &imm); + + void orl(Register dst, Register src); + + void orl(Register dst, const Address &address); + + void xorl(Register dst, Register src); + + void xorl(Register dst, const Immediate &imm); + + void xorl(Register dst, const Address &address); + + void addl(Register dst, Register src); + + void addl(Register reg, const Immediate &imm); + + void addl(Register reg, const Address &address); + + void addl(const Address &address, Register reg); + + void addl(const Address &address, const Immediate &imm); + + void addw(const Address &address, const Immediate &imm); + + void adcl(Register dst, Register src); + + void adcl(Register reg, const Immediate &imm); + + void adcl(Register dst, const Address &address); + + void subl(Register dst, Register src); + + void subl(Register reg, const Immediate &imm); + + void subl(Register reg, const Address &address); + + void subl(const Address &address, Register src); + + void cdq(); + + void idivl(Register reg); + + void imull(Register dst, Register src); + + void imull(Register reg, const Immediate &imm); + + void imull(Register dst, Register src, const Immediate &imm); + + void imull(Register reg, const Address &address); + + void imull(Register reg); + + void imull(const Address &address); + + void mull(Register reg); + + void mull(const Address &address); + + void sbbl(Register dst, Register src); + + void sbbl(Register reg, const Immediate &imm); + + void sbbl(Register reg, const Address &address); + + void sbbl(const Address &address, Register src); + + void incl(Register reg); + + void incl(const Address &address); + + void decl(Register reg); + + void decl(const Address &address); + + void shll(Register reg, const Immediate &imm); + + void shll(Register operand, Register shifter); + + void shll(const Address &address, const Immediate &imm); + + void shll(const Address &address, Register shifter); + + void shrl(Register reg, const Immediate &imm); + + void shrl(Register operand, Register shifter); + + void shrl(const Address &address, const Immediate &imm); + + void shrl(const Address &address, Register shifter); + + void sarl(Register reg, const Immediate &imm); + + void sarl(Register operand, Register shifter); + + void sarl(const Address &address, const Immediate &imm); + + void sarl(const Address &address, Register shifter); + + void shld(Register dst, Register src, Register shifter); + + void shld(Register dst, Register src, const Immediate &imm); + + void shrd(Register dst, Register src, Register shifter); + + void shrd(Register dst, Register src, const Immediate &imm); + + void negl(Register reg); + + void notl(Register reg); + + void enter(const Immediate &imm); + + void leave(); + + void ret(); + + void ret(const Immediate &imm); + + void nop(); + + void int3(); + + void hlt(); + + void j(Condition condition, Label *label); + + void j(Condition condition, NearLabel *label); + + void jecxz(NearLabel *label); + + void jmp(Register reg); + + void jmp(const Address &address); + + void jmp(Label *label); + + void jmp(NearLabel *label); + + void repne_scasb(); + + void repne_scasw(); + + void repe_cmpsb(); + + void repe_cmpsw(); + + void repe_cmpsl(); + + void rep_movsb(); + + void rep_movsw(); + + X86Assembler *lock(); + + void cmpxchgl(const Address &address, Register reg); + + void cmpxchg8b(const Address &address); + + void mfence(); + + X86Assembler *fs(); + + X86Assembler *gs(); + + // + // Macros for High-level operations. + // + + void AddImmediate(Register reg, const Immediate &imm); + + void LoadLongConstant(XmmRegister dst, int64_t value); + + void LoadDoubleConstant(XmmRegister dst, double value); + + void LockCmpxchgl(const Address &address, Register reg) { + lock()->cmpxchgl(address, reg); + } + + void LockCmpxchg8b(const Address &address) { + lock()->cmpxchg8b(address); + } + + // + // Misc. functionality + // + int PreferredLoopAlignment() { return 16; } + + void Align(int alignment, int offset); + + void Bind(Label *label) override; + + void Jump(Label *label) override { + jmp(label); + } + + void Bind(NearLabel *label); + + // + // Heap poisoning. + // + + // Poison a heap reference contained in `reg`. + void PoisonHeapReference(Register reg) { negl(reg); } + + // Unpoison a heap reference contained in `reg`. + void UnpoisonHeapReference(Register reg) { negl(reg); } + + // Poison a heap reference contained in `reg` if heap poisoning is enabled. + void MaybePoisonHeapReference(Register reg) { + if (kPoisonHeapReferences) { + PoisonHeapReference(reg); + } + } + + // Unpoison a heap reference contained in `reg` if heap poisoning is enabled. + void MaybeUnpoisonHeapReference(Register reg) { + if (kPoisonHeapReferences) { + UnpoisonHeapReference(reg); + } + } + + // Add a double to the constant area, returning the offset into + // the constant area where the literal resides. + size_t AddDouble(double v) { return constant_area_.AddDouble(v); } + + // Add a float to the constant area, returning the offset into + // the constant area where the literal resides. + size_t AddFloat(float v) { return constant_area_.AddFloat(v); } + + // Add an int32_t to the constant area, returning the offset into + // the constant area where the literal resides. + size_t AddInt32(int32_t v) { + return constant_area_.AddInt32(v); + } + + // Add an int32_t to the end of the constant area, returning the offset into + // the constant area where the literal resides. + size_t AppendInt32(int32_t v) { + return constant_area_.AppendInt32(v); + } + + // Add an int64_t to the constant area, returning the offset into + // the constant area where the literal resides. + size_t AddInt64(int64_t v) { return constant_area_.AddInt64(v); } + + // Add the contents of the constant area to the assembler buffer. + void AddConstantArea(); + + // Is the constant area empty? Return true if there are no literals in the constant area. + bool IsConstantAreaEmpty() const { return constant_area_.IsEmpty(); } + + // Return the current size of the constant area. + size_t ConstantAreaSize() const { return constant_area_.GetSize(); } + + private: + inline void EmitUint8(uint8_t value); + + inline void EmitInt32(int32_t value); + + inline void EmitRegisterOperand(int rm, int reg); + + inline void EmitXmmRegisterOperand(int rm, XmmRegister reg); + + inline void EmitFixup(AssemblerFixup *fixup); + + inline void EmitOperandSizeOverride(); + + void EmitOperand(int rm, const Operand &operand); + + void EmitImmediate(const Immediate &imm, bool is_16_op = false); + + void EmitComplex( + int rm, const Operand &operand, const Immediate &immediate, bool is_16_op = false); + + void EmitLabel(Label *label, int instruction_size); + + void EmitLabelLink(Label *label); + + void EmitLabelLink(NearLabel *label); + + void EmitGenericShift(int rm, const Operand &operand, const Immediate &imm); + + void EmitGenericShift(int rm, const Operand &operand, Register shifter); + + // Emit a 3 byte VEX Prefix + uint8_t EmitVexByteZero(bool is_two_byte); + + uint8_t EmitVexByte1(bool r, bool x, bool b, int mmmmm); + + uint8_t EmitVexByte2(bool w, int l, X86ManagedRegister operand, int pp); + + ConstantArea constant_area_; + + DISALLOW_COPY_AND_ASSIGN(X86Assembler); +}; + +inline void X86Assembler::EmitUint8(uint8_t value) { + buffer_.Emit(value); +} + +inline void X86Assembler::EmitInt32(int32_t value) { + buffer_.Emit(value); +} + +inline void X86Assembler::EmitRegisterOperand(int rm, int reg) { + CHECK_GE(rm, 0); + CHECK_LT(rm, 8); + buffer_.Emit(0xC0 + (rm << 3) + reg); +} + +inline void X86Assembler::EmitXmmRegisterOperand(int rm, XmmRegister reg) { + EmitRegisterOperand(rm, static_cast(reg)); +} + +inline void X86Assembler::EmitFixup(AssemblerFixup *fixup) { + buffer_.EmitFixup(fixup); +} + +inline void X86Assembler::EmitOperandSizeOverride() { + EmitUint8(0x66); +} + +} // namespace x86 +} // namespace whale + +#endif // WHALE_ASSEMBLER_ASSEMBLER_X86_H_ diff --git a/module/src/main/cpp/whale/src/assembler/x86/constants_x86.h b/module/src/main/cpp/whale/src/assembler/x86/constants_x86.h new file mode 100644 index 00000000..371744b4 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/x86/constants_x86.h @@ -0,0 +1,94 @@ +#ifndef WHALE_ASSEMBLER_CONSTANTS_X86_H_ +#define WHALE_ASSEMBLER_CONSTANTS_X86_H_ + +#include +#include "base/macros.h" + +namespace whale { +namespace x86 { + + +enum ByteRegister { + AL = 0, + CL = 1, + DL = 2, + BL = 3, + AH = 4, + CH = 5, + DH = 6, + BH = 7, + kNoByteRegister = -1 // Signals an illegal register. +}; + +enum X87Register { + ST0 = 0, + ST1 = 1, + ST2 = 2, + ST3 = 3, + ST4 = 4, + ST5 = 5, + ST6 = 6, + ST7 = 7, + kNumberOfX87Registers = 8, + kNoX87Register = -1 // Signals an illegal register. +}; + +enum ScaleFactor { + TIMES_1 = 0, + TIMES_2 = 1, + TIMES_4 = 2, + TIMES_8 = 3 +}; + +enum Condition { + kOverflow = 0, + kNoOverflow = 1, + kBelow = 2, + kAboveEqual = 3, + kEqual = 4, + kNotEqual = 5, + kBelowEqual = 6, + kAbove = 7, + kSign = 8, + kNotSign = 9, + kParityEven = 10, + kParityOdd = 11, + kLess = 12, + kGreaterEqual = 13, + kLessEqual = 14, + kGreater = 15, + + kZero = kEqual, + kNotZero = kNotEqual, + kNegative = kSign, + kPositive = kNotSign, + kCarrySet = kBelow, + kCarryClear = kAboveEqual, + kUnordered = kParityEven +}; + + +class Instr { + public: + static const uint8_t kHltInstruction = 0xF4; + // We prefer not to use the int3 instruction since it conflicts with gdb. + static const uint8_t kBreakPointInstruction = kHltInstruction; + + bool IsBreakPoint() { + return (*reinterpret_cast(this)) == kBreakPointInstruction; + } + + // Instructions are read out of a code stream. The only way to get a + // reference to an instruction is to convert a pointer. There is no way + // to allocate or create instances of class Instr. + // Use the At(pc) function to create references to Instr. + static Instr *At(uintptr_t pc) { return reinterpret_cast(pc); } + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(Instr); +}; + +} // namespace x86 +} // namespace whale + +#endif // WHALE_ASSEMBLER_CONSTANTS_X86_H_ diff --git a/module/src/main/cpp/whale/src/assembler/x86/managed_register_x86.cc b/module/src/main/cpp/whale/src/assembler/x86/managed_register_x86.cc new file mode 100644 index 00000000..7865f1a8 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/x86/managed_register_x86.cc @@ -0,0 +1,69 @@ +#include "assembler/x86/managed_register_x86.h" + +namespace whale { +namespace x86 { + +// Define register pairs. +// This list must be kept in sync with the RegisterPair enum. +#define REGISTER_PAIR_LIST(P) \ + P(EAX, EDX) \ + P(EAX, ECX) \ + P(EAX, EBX) \ + P(EAX, EDI) \ + P(EDX, ECX) \ + P(EDX, EBX) \ + P(EDX, EDI) \ + P(ECX, EBX) \ + P(ECX, EDI) \ + P(EBX, EDI) \ + P(ECX, EDX) + + +struct RegisterPairDescriptor { + RegisterPair reg; // Used to verify that the enum is in sync. + Register low; + Register high; +}; + + +static const RegisterPairDescriptor kRegisterPairs[] = { +#define REGISTER_PAIR_ENUMERATION(low, high) { low##_##high, low, high }, + REGISTER_PAIR_LIST(REGISTER_PAIR_ENUMERATION) +#undef REGISTER_PAIR_ENUMERATION +}; + + +bool X86ManagedRegister::Overlaps(const X86ManagedRegister &other) const { + if (IsNoRegister() || other.IsNoRegister()) return false; + CHECK(IsValidManagedRegister()); + CHECK(other.IsValidManagedRegister()); + if (Equals(other)) return true; + if (IsRegisterPair()) { + Register low = AsRegisterPairLow(); + Register high = AsRegisterPairHigh(); + return X86ManagedRegister::FromCpuRegister(low).Overlaps(other) || + X86ManagedRegister::FromCpuRegister(high).Overlaps(other); + } + if (other.IsRegisterPair()) { + return other.Overlaps(*this); + } + return false; +} + + +int X86ManagedRegister::AllocIdLow() const { + const int r = RegId() - (kNumberOfCpuRegIds + kNumberOfXmmRegIds + + kNumberOfX87RegIds); + return kRegisterPairs[r].low; +} + + +int X86ManagedRegister::AllocIdHigh() const { + const int r = RegId() - (kNumberOfCpuRegIds + kNumberOfXmmRegIds + + kNumberOfX87RegIds); + return kRegisterPairs[r].high; +} + + +} // namespace x86 +} // namespace whale diff --git a/module/src/main/cpp/whale/src/assembler/x86/managed_register_x86.h b/module/src/main/cpp/whale/src/assembler/x86/managed_register_x86.h new file mode 100644 index 00000000..8b798595 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/x86/managed_register_x86.h @@ -0,0 +1,179 @@ +#ifndef WHALE_ASSEMBLER_MANAGED_REGISTER_X86_H_ +#define WHALE_ASSEMBLER_MANAGED_REGISTER_X86_H_ + +#include +#include "assembler/x86/registers_x86.h" +#include "assembler/x86/constants_x86.h" +#include "assembler/managed_register.h" +#include "base/logging.h" + +namespace whale { +namespace x86 { + +enum RegisterPair { + EAX_EDX = 0, + EAX_ECX = 1, + EAX_EBX = 2, + EAX_EDI = 3, + EDX_ECX = 4, + EDX_EBX = 5, + EDX_EDI = 6, + ECX_EBX = 7, + ECX_EDI = 8, + EBX_EDI = 9, + ECX_EDX = 10, + kNumberOfRegisterPairs = 11, + kNoRegisterPair = -1, +}; + +const int kNumberOfCpuRegIds = kNumberOfCpuRegisters; +const int kNumberOfCpuAllocIds = kNumberOfCpuRegisters; + +const int kNumberOfXmmRegIds = kNumberOfXmmRegisters; +const int kNumberOfXmmAllocIds = kNumberOfXmmRegisters; + +const int kNumberOfX87RegIds = kNumberOfX87Registers; +const int kNumberOfX87AllocIds = kNumberOfX87Registers; + +const int kNumberOfPairRegIds = kNumberOfRegisterPairs; + +const int kNumberOfRegIds = kNumberOfCpuRegIds + kNumberOfXmmRegIds + + kNumberOfX87RegIds + kNumberOfPairRegIds; +const int kNumberOfAllocIds = kNumberOfCpuAllocIds + kNumberOfXmmAllocIds + + kNumberOfX87RegIds; + +// Register ids map: +// [0..R[ cpu registers (enum Register) +// [R..X[ xmm registers (enum XmmRegister) +// [X..S[ x87 registers (enum X87Register) +// [S..P[ register pairs (enum RegisterPair) +// where +// R = kNumberOfCpuRegIds +// X = R + kNumberOfXmmRegIds +// S = X + kNumberOfX87RegIds +// P = X + kNumberOfRegisterPairs + +// Allocation ids map: +// [0..R[ cpu registers (enum Register) +// [R..X[ xmm registers (enum XmmRegister) +// [X..S[ x87 registers (enum X87Register) +// where +// R = kNumberOfCpuRegIds +// X = R + kNumberOfXmmRegIds +// S = X + kNumberOfX87RegIds + + +// An instance of class 'ManagedRegister' represents a single cpu register (enum +// Register), an xmm register (enum XmmRegister), or a pair of cpu registers +// (enum RegisterPair). +// 'ManagedRegister::NoRegister()' provides an invalid register. +// There is a one-to-one mapping between ManagedRegister and register id. +class X86ManagedRegister : public ManagedRegister { + public: + constexpr ByteRegister AsByteRegister() const { + return static_cast(id_); + } + + constexpr Register AsCpuRegister() const { + return static_cast(id_); + } + + constexpr XmmRegister AsXmmRegister() const { + return static_cast(id_ - kNumberOfCpuRegIds); + } + + constexpr X87Register AsX87Register() const { + return static_cast(id_ - + (kNumberOfCpuRegIds + kNumberOfXmmRegIds)); + } + + constexpr Register AsRegisterPairLow() const { + // Appropriate mapping of register ids allows to use AllocIdLow(). + return FromRegId(AllocIdLow()).AsCpuRegister(); + } + + constexpr Register AsRegisterPairHigh() const { + // Appropriate mapping of register ids allows to use AllocIdHigh(). + return FromRegId(AllocIdHigh()).AsCpuRegister(); + } + + constexpr RegisterPair AsRegisterPair() const { + return static_cast(id_ - + (kNumberOfCpuRegIds + kNumberOfXmmRegIds + + kNumberOfX87RegIds)); + } + + constexpr bool IsCpuRegister() const { + return (0 <= id_) && (id_ < kNumberOfCpuRegIds); + } + + constexpr bool IsXmmRegister() const { + const int test = id_ - kNumberOfCpuRegIds; + return (0 <= test) && (test < kNumberOfXmmRegIds); + } + + constexpr bool IsX87Register() const { + const int test = id_ - (kNumberOfCpuRegIds + kNumberOfXmmRegIds); + return (0 <= test) && (test < kNumberOfX87RegIds); + } + + constexpr bool IsRegisterPair() const { + const int test = id_ - + (kNumberOfCpuRegIds + kNumberOfXmmRegIds + kNumberOfX87RegIds); + return (0 <= test) && (test < kNumberOfPairRegIds); + } + + // Returns true if the two managed-registers ('this' and 'other') overlap. + // Either managed-register may be the NoRegister. If both are the NoRegister + // then false is returned. + bool Overlaps(const X86ManagedRegister &other) const; + + static constexpr X86ManagedRegister FromCpuRegister(Register r) { + return FromRegId(r); + } + + static constexpr X86ManagedRegister FromXmmRegister(XmmRegister r) { + return FromRegId(r + kNumberOfCpuRegIds); + } + + static constexpr X86ManagedRegister FromX87Register(X87Register r) { + return FromRegId(r + kNumberOfCpuRegIds + kNumberOfXmmRegIds); + } + + static constexpr X86ManagedRegister FromRegisterPair(RegisterPair r) { + return FromRegId(r + (kNumberOfCpuRegIds + kNumberOfXmmRegIds + + kNumberOfX87RegIds)); + } + + private: + constexpr bool IsValidManagedRegister() const { + return (0 <= id_) && (id_ < kNumberOfRegIds); + } + + constexpr int RegId() const { + CHECK(!IsNoRegister()); + return id_; + } + + int AllocId() const { + return id_; + } + + int AllocIdLow() const; + + int AllocIdHigh() const; + + friend class ManagedRegister; + + explicit constexpr X86ManagedRegister(int reg_id) : ManagedRegister(reg_id) {} + + static constexpr X86ManagedRegister FromRegId(int reg_id) { + X86ManagedRegister reg(reg_id); + return reg; + } +}; + +} // namespace x86 +} // namespace whale + +#endif // WHALE_ASSEMBLER_MANAGED_REGISTER_X86_H_ diff --git a/module/src/main/cpp/whale/src/assembler/x86/registers_x86.h b/module/src/main/cpp/whale/src/assembler/x86/registers_x86.h new file mode 100644 index 00000000..8887979f --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/x86/registers_x86.h @@ -0,0 +1,39 @@ +#ifndef WHALE_ASSEMBLER_X86_REGISTERS_X86_H_ +#define WHALE_ASSEMBLER_X86_REGISTERS_X86_H_ + +#include + +namespace whale { +namespace x86 { + +enum Register { + EAX = 0, + ECX = 1, + EDX = 2, + EBX = 3, + ESP = 4, + EBP = 5, + ESI = 6, + EDI = 7, + kNumberOfCpuRegisters = 8, + kFirstByteUnsafeRegister = 4, + kNoRegister = -1 // Signals an illegal register. +}; + +enum XmmRegister { + XMM0 = 0, + XMM1 = 1, + XMM2 = 2, + XMM3 = 3, + XMM4 = 4, + XMM5 = 5, + XMM6 = 6, + XMM7 = 7, + kNumberOfXmmRegisters = 8, + kNoXmmRegister = -1 // Signals an illegal register. +}; + +} // namespace x86 +} // namespace whale + +#endif // WHALE_ASSEMBLER_X86_REGISTERS_X86_H_ diff --git a/module/src/main/cpp/whale/src/assembler/x86_64/assembler_x86_64.cc b/module/src/main/cpp/whale/src/assembler/x86_64/assembler_x86_64.cc new file mode 100644 index 00000000..c800ce3b --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/x86_64/assembler_x86_64.cc @@ -0,0 +1,3881 @@ +#include "assembler/x86_64/assembler_x86_64.h" + +namespace whale { +namespace x86_64 { + + +uint8_t X86_64Assembler::EmitVexByteZero(bool is_two_byte) { + uint8_t vex_zero = 0xC0; + if (!is_two_byte) { + vex_zero |= 0xC4; + } else { + vex_zero |= 0xC5; + } + return vex_zero; +} + +uint8_t X86_64Assembler::EmitVexByte1(bool r, bool x, bool b, int mmmmm) { + // VEX Byte 1 + uint8_t vex_prefix = 0; + if (!r) { + vex_prefix |= 0x80; // VEX.R + } + if (!x) { + vex_prefix |= 0x40; // VEX.X + } + if (!b) { + vex_prefix |= 0x20; // VEX.B + } + + // VEX.mmmmm + switch (mmmmm) { + case 1: + // implied 0F leading opcode byte + vex_prefix |= 0x01; + break; + case 2: + // implied leading 0F 38 opcode byte + vex_prefix |= 0x02; + break; + case 3: + // implied leading OF 3A opcode byte + vex_prefix |= 0x03; + break; + default: + LOG(FATAL) << "unknown opcode bytes"; + } + + return vex_prefix; +} + +uint8_t X86_64Assembler::EmitVexByte2(bool w, int l, X86_64ManagedRegister operand, int pp) { + // VEX Byte 2 + uint8_t vex_prefix = 0; + if (w) { + vex_prefix |= 0x80; + } + // VEX.vvvv + if (operand.IsXmmRegister()) { + XmmRegister vvvv = operand.AsXmmRegister(); + int inverted_reg = 15 - static_cast(vvvv.AsFloatRegister()); + uint8_t reg = static_cast(inverted_reg); + vex_prefix |= ((reg & 0x0F) << 3); + } else if (operand.IsCpuRegister()) { + CpuRegister vvvv = operand.AsCpuRegister(); + int inverted_reg = 15 - static_cast(vvvv.AsRegister()); + uint8_t reg = static_cast(inverted_reg); + vex_prefix |= ((reg & 0x0F) << 3); + } + + // VEX.L + if (l == 256) { + vex_prefix |= 0x04; + } + + // VEX.pp + switch (pp) { + case 0: + // SIMD Pefix - None + vex_prefix |= 0x00; + break; + case 1: + // SIMD Prefix - 66 + vex_prefix |= 0x01; + break; + case 2: + // SIMD Prefix - F3 + vex_prefix |= 0x02; + break; + case 3: + // SIMD Prefix - F2 + vex_prefix |= 0x03; + break; + default: + LOG(FATAL) << "unknown SIMD Prefix"; + } + + return vex_prefix; +} + +void X86_64Assembler::call(CpuRegister reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg); + EmitUint8(0xFF); + EmitRegisterOperand(2, reg.LowBits()); +} + + +void X86_64Assembler::call(const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(address); + EmitUint8(0xFF); + EmitOperand(2, address); +} + + +void X86_64Assembler::call(Label *label) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xE8); + static const int kSize = 5; + // Offset by one because we already have emitted the opcode. + EmitLabel(label, kSize - 1); +} + +void X86_64Assembler::pushq(CpuRegister reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg); + EmitUint8(0x50 + reg.LowBits()); +} + + +void X86_64Assembler::pushq(const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(address); + EmitUint8(0xFF); + EmitOperand(6, address); +} + + +void X86_64Assembler::pushq(const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_int32()); // pushq only supports 32b immediate. + if (imm.is_int8()) { + EmitUint8(0x6A); + EmitUint8(imm.value() & 0xFF); + } else { + EmitUint8(0x68); + EmitImmediate(imm); + } +} + + +void X86_64Assembler::popq(CpuRegister reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg); + EmitUint8(0x58 + reg.LowBits()); +} + + +void X86_64Assembler::popq(const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(address); + EmitUint8(0x8F); + EmitOperand(0, address); +} + + +void X86_64Assembler::movq(CpuRegister dst, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + if (imm.is_int32()) { + // 32 bit. Note: sign-extends. + EmitRex64(dst); + EmitUint8(0xC7); + EmitRegisterOperand(0, dst.LowBits()); + EmitInt32(static_cast(imm.value())); + } else { + EmitRex64(dst); + EmitUint8(0xB8 + dst.LowBits()); + EmitInt64(imm.value()); + } +} + + +void X86_64Assembler::movl(CpuRegister dst, const Immediate &imm) { + CHECK(imm.is_int32()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst); + EmitUint8(0xB8 + dst.LowBits()); + EmitImmediate(imm); +} + + +void X86_64Assembler::movq(const Address &dst, const Immediate &imm) { + CHECK(imm.is_int32()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst); + EmitUint8(0xC7); + EmitOperand(0, dst); + EmitImmediate(imm); +} + + +void X86_64Assembler::movq(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + // 0x89 is movq r/m64 <- r64, with op1 in r/m and op2 in reg: so reverse EmitRex64 + EmitRex64(src, dst); + EmitUint8(0x89); + EmitRegisterOperand(src.LowBits(), dst.LowBits()); +} + + +void X86_64Assembler::movl(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x8B); + EmitRegisterOperand(dst.LowBits(), src.LowBits()); +} + + +void X86_64Assembler::movq(CpuRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, src); + EmitUint8(0x8B); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movl(CpuRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x8B); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movq(const Address &dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(src, dst); + EmitUint8(0x89); + EmitOperand(src.LowBits(), dst); +} + + +void X86_64Assembler::movl(const Address &dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(src, dst); + EmitUint8(0x89); + EmitOperand(src.LowBits(), dst); +} + +void X86_64Assembler::movl(const Address &dst, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst); + EmitUint8(0xC7); + EmitOperand(0, dst); + EmitImmediate(imm); +} + +void X86_64Assembler::movntl(const Address &dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(src, dst); + EmitUint8(0x0F); + EmitUint8(0xC3); + EmitOperand(src.LowBits(), dst); +} + +void X86_64Assembler::movntq(const Address &dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(src, dst); + EmitUint8(0x0F); + EmitUint8(0xC3); + EmitOperand(src.LowBits(), dst); +} + +void X86_64Assembler::cmov(Condition c, CpuRegister dst, CpuRegister src) { + cmov(c, dst, src, true); +} + +void X86_64Assembler::cmov(Condition c, CpuRegister dst, CpuRegister src, bool is64bit) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex(false, is64bit, dst.NeedsRex(), false, src.NeedsRex()); + EmitUint8(0x0F); + EmitUint8(0x40 + c); + EmitRegisterOperand(dst.LowBits(), src.LowBits()); +} + + +void X86_64Assembler::cmov(Condition c, CpuRegister dst, const Address &src, bool is64bit) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + if (is64bit) { + EmitRex64(dst, src); + } else { + EmitOptionalRex32(dst, src); + } + EmitUint8(0x0F); + EmitUint8(0x40 + c); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movzxb(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalByteRegNormalizingRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xB6); + EmitRegisterOperand(dst.LowBits(), src.LowBits()); +} + + +void X86_64Assembler::movzxb(CpuRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + // Byte register is only in the source register form, so we don't use + // EmitOptionalByteRegNormalizingRex32(dst, src); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xB6); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movsxb(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalByteRegNormalizingRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xBE); + EmitRegisterOperand(dst.LowBits(), src.LowBits()); +} + + +void X86_64Assembler::movsxb(CpuRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + // Byte register is only in the source register form, so we don't use + // EmitOptionalByteRegNormalizingRex32(dst, src); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xBE); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movb(CpuRegister /*dst*/, const Address & /*src*/) { + LOG(FATAL) << "Use movzxb or movsxb instead."; +} + + +void X86_64Assembler::movb(const Address &dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalByteRegNormalizingRex32(src, dst); + EmitUint8(0x88); + EmitOperand(src.LowBits(), dst); +} + + +void X86_64Assembler::movb(const Address &dst, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst); + EmitUint8(0xC6); + EmitOperand(Register::RAX, dst); + CHECK(imm.is_int8()); + EmitUint8(imm.value() & 0xFF); +} + + +void X86_64Assembler::movzxw(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xB7); + EmitRegisterOperand(dst.LowBits(), src.LowBits()); +} + + +void X86_64Assembler::movzxw(CpuRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xB7); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movsxw(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xBF); + EmitRegisterOperand(dst.LowBits(), src.LowBits()); +} + + +void X86_64Assembler::movsxw(CpuRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xBF); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movw(CpuRegister /*dst*/, const Address & /*src*/) { + LOG(FATAL) << "Use movzxw or movsxw instead."; +} + + +void X86_64Assembler::movw(const Address &dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOperandSizeOverride(); + EmitOptionalRex32(src, dst); + EmitUint8(0x89); + EmitOperand(src.LowBits(), dst); +} + + +void X86_64Assembler::movw(const Address &dst, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOperandSizeOverride(); + EmitOptionalRex32(dst); + EmitUint8(0xC7); + EmitOperand(Register::RAX, dst); + CHECK(imm.is_uint16() || imm.is_int16()); + EmitUint8(imm.value() & 0xFF); + EmitUint8(imm.value() >> 8); +} + + +void X86_64Assembler::leaq(CpuRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, src); + EmitUint8(0x8D); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::leal(CpuRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x8D); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movaps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x28); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movaps(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x28); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movups(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x10); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movaps(const Address &dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(src, dst); + EmitUint8(0x0F); + EmitUint8(0x29); + EmitOperand(src.LowBits(), dst); +} + + +void X86_64Assembler::movups(const Address &dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(src, dst); + EmitUint8(0x0F); + EmitUint8(0x11); + EmitOperand(src.LowBits(), dst); +} + + +void X86_64Assembler::movss(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x10); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movss(const Address &dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(src, dst); + EmitUint8(0x0F); + EmitUint8(0x11); + EmitOperand(src.LowBits(), dst); +} + + +void X86_64Assembler::movss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(src, dst); // Movss is MR encoding instead of the usual RM. + EmitUint8(0x0F); + EmitUint8(0x11); + EmitXmmRegisterOperand(src.LowBits(), dst); +} + + +void X86_64Assembler::movsxd(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, src); + EmitUint8(0x63); + EmitRegisterOperand(dst.LowBits(), src.LowBits()); +} + + +void X86_64Assembler::movsxd(CpuRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, src); + EmitUint8(0x63); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movd(XmmRegister dst, CpuRegister src) { + movd(dst, src, true); +} + +void X86_64Assembler::movd(CpuRegister dst, XmmRegister src) { + movd(dst, src, true); +} + +void X86_64Assembler::movd(XmmRegister dst, CpuRegister src, bool is64bit) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex(false, is64bit, dst.NeedsRex(), false, src.NeedsRex()); + EmitUint8(0x0F); + EmitUint8(0x6E); + EmitOperand(dst.LowBits(), Operand(src)); +} + +void X86_64Assembler::movd(CpuRegister dst, XmmRegister src, bool is64bit) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex(false, is64bit, src.NeedsRex(), false, dst.NeedsRex()); + EmitUint8(0x0F); + EmitUint8(0x7E); + EmitOperand(src.LowBits(), Operand(dst)); +} + + +void X86_64Assembler::addss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x58); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::addss(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x58); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::subss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5C); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::subss(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5C); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::mulss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x59); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::mulss(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x59); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::divss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5E); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::divss(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5E); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::addps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x58); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::subps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5C); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::mulps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x59); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::divps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5E); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::flds(const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitOperand(0, src); +} + + +void X86_64Assembler::fsts(const Address &dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitOperand(2, dst); +} + + +void X86_64Assembler::fstps(const Address &dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitOperand(3, dst); +} + + +void X86_64Assembler::movapd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x28); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movapd(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x28); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movupd(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x10); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movapd(const Address &dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(src, dst); + EmitUint8(0x0F); + EmitUint8(0x29); + EmitOperand(src.LowBits(), dst); +} + + +void X86_64Assembler::movupd(const Address &dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(src, dst); + EmitUint8(0x0F); + EmitUint8(0x11); + EmitOperand(src.LowBits(), dst); +} + + +void X86_64Assembler::movsd(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x10); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movsd(const Address &dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitOptionalRex32(src, dst); + EmitUint8(0x0F); + EmitUint8(0x11); + EmitOperand(src.LowBits(), dst); +} + + +void X86_64Assembler::movsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitOptionalRex32(src, dst); // Movsd is MR encoding instead of the usual RM. + EmitUint8(0x0F); + EmitUint8(0x11); + EmitXmmRegisterOperand(src.LowBits(), dst); +} + + +void X86_64Assembler::addsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x58); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::addsd(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x58); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::subsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5C); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::subsd(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5C); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::mulsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x59); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::mulsd(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x59); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::divsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5E); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::divsd(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5E); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::addpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x58); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::subpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5C); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::mulpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x59); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::divpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5E); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movdqa(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x6F); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movdqa(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x6F); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movdqu(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x6F); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::movdqa(const Address &dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(src, dst); + EmitUint8(0x0F); + EmitUint8(0x7F); + EmitOperand(src.LowBits(), dst); +} + + +void X86_64Assembler::movdqu(const Address &dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(src, dst); + EmitUint8(0x0F); + EmitUint8(0x7F); + EmitOperand(src.LowBits(), dst); +} + + +void X86_64Assembler::paddb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xFC); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::psubb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xF8); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::paddw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xFD); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::psubw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xF9); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::pmullw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xD5); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::paddd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xFE); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::psubd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xFA); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::pmulld(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x40); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::paddq(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xD4); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::psubq(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xFB); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::paddusb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xDC); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::paddsb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xEC); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::paddusw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xDD); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::paddsw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xED); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::psubusb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xD8); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::psubsb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xE8); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::psubusw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xD9); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::psubsw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xE9); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::cvtsi2ss(XmmRegister dst, CpuRegister src) { + cvtsi2ss(dst, src, false); +} + + +void X86_64Assembler::cvtsi2ss(XmmRegister dst, CpuRegister src, bool is64bit) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + if (is64bit) { + // Emit a REX.W prefix if the operand size is 64 bits. + EmitRex64(dst, src); + } else { + EmitOptionalRex32(dst, src); + } + EmitUint8(0x0F); + EmitUint8(0x2A); + EmitOperand(dst.LowBits(), Operand(src)); +} + + +void X86_64Assembler::cvtsi2ss(XmmRegister dst, const Address &src, bool is64bit) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + if (is64bit) { + // Emit a REX.W prefix if the operand size is 64 bits. + EmitRex64(dst, src); + } else { + EmitOptionalRex32(dst, src); + } + EmitUint8(0x0F); + EmitUint8(0x2A); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::cvtsi2sd(XmmRegister dst, CpuRegister src) { + cvtsi2sd(dst, src, false); +} + + +void X86_64Assembler::cvtsi2sd(XmmRegister dst, CpuRegister src, bool is64bit) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + if (is64bit) { + // Emit a REX.W prefix if the operand size is 64 bits. + EmitRex64(dst, src); + } else { + EmitOptionalRex32(dst, src); + } + EmitUint8(0x0F); + EmitUint8(0x2A); + EmitOperand(dst.LowBits(), Operand(src)); +} + + +void X86_64Assembler::cvtsi2sd(XmmRegister dst, const Address &src, bool is64bit) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + if (is64bit) { + // Emit a REX.W prefix if the operand size is 64 bits. + EmitRex64(dst, src); + } else { + EmitOptionalRex32(dst, src); + } + EmitUint8(0x0F); + EmitUint8(0x2A); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::cvtss2si(CpuRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x2D); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::cvtss2sd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5A); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::cvtss2sd(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5A); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::cvtsd2si(CpuRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x2D); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::cvttss2si(CpuRegister dst, XmmRegister src) { + cvttss2si(dst, src, false); +} + + +void X86_64Assembler::cvttss2si(CpuRegister dst, XmmRegister src, bool is64bit) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + if (is64bit) { + // Emit a REX.W prefix if the operand size is 64 bits. + EmitRex64(dst, src); + } else { + EmitOptionalRex32(dst, src); + } + EmitUint8(0x0F); + EmitUint8(0x2C); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::cvttsd2si(CpuRegister dst, XmmRegister src) { + cvttsd2si(dst, src, false); +} + + +void X86_64Assembler::cvttsd2si(CpuRegister dst, XmmRegister src, bool is64bit) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + if (is64bit) { + // Emit a REX.W prefix if the operand size is 64 bits. + EmitRex64(dst, src); + } else { + EmitOptionalRex32(dst, src); + } + EmitUint8(0x0F); + EmitUint8(0x2C); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::cvtsd2ss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5A); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::cvtsd2ss(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5A); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::cvtdq2ps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5B); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::cvtdq2pd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xE6); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::comiss(XmmRegister a, XmmRegister b) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(a, b); + EmitUint8(0x0F); + EmitUint8(0x2F); + EmitXmmRegisterOperand(a.LowBits(), b); +} + + +void X86_64Assembler::comiss(XmmRegister a, const Address &b) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(a, b); + EmitUint8(0x0F); + EmitUint8(0x2F); + EmitOperand(a.LowBits(), b); +} + + +void X86_64Assembler::comisd(XmmRegister a, XmmRegister b) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(a, b); + EmitUint8(0x0F); + EmitUint8(0x2F); + EmitXmmRegisterOperand(a.LowBits(), b); +} + + +void X86_64Assembler::comisd(XmmRegister a, const Address &b) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(a, b); + EmitUint8(0x0F); + EmitUint8(0x2F); + EmitOperand(a.LowBits(), b); +} + + +void X86_64Assembler::ucomiss(XmmRegister a, XmmRegister b) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(a, b); + EmitUint8(0x0F); + EmitUint8(0x2E); + EmitXmmRegisterOperand(a.LowBits(), b); +} + + +void X86_64Assembler::ucomiss(XmmRegister a, const Address &b) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(a, b); + EmitUint8(0x0F); + EmitUint8(0x2E); + EmitOperand(a.LowBits(), b); +} + + +void X86_64Assembler::ucomisd(XmmRegister a, XmmRegister b) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(a, b); + EmitUint8(0x0F); + EmitUint8(0x2E); + EmitXmmRegisterOperand(a.LowBits(), b); +} + + +void X86_64Assembler::ucomisd(XmmRegister a, const Address &b) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(a, b); + EmitUint8(0x0F); + EmitUint8(0x2E); + EmitOperand(a.LowBits(), b); +} + + +void X86_64Assembler::roundsd(XmmRegister dst, XmmRegister src, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x3A); + EmitUint8(0x0B); + EmitXmmRegisterOperand(dst.LowBits(), src); + EmitUint8(imm.value()); +} + + +void X86_64Assembler::roundss(XmmRegister dst, XmmRegister src, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x3A); + EmitUint8(0x0A); + EmitXmmRegisterOperand(dst.LowBits(), src); + EmitUint8(imm.value()); +} + + +void X86_64Assembler::sqrtsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x51); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::sqrtss(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x51); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::xorpd(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x57); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::xorpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x57); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::xorps(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x57); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::xorps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x57); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::pxor(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xEF); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::andpd(XmmRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x54); + EmitOperand(dst.LowBits(), src); +} + +void X86_64Assembler::andpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x54); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::andps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x54); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pand(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xDB); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::andn(CpuRegister dst, CpuRegister src1, CpuRegister src2) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false); + uint8_t byte_one = EmitVexByte1(dst.NeedsRex(), + /*x=*/ false, + src2.NeedsRex(), + /*mmmmm=*/ 2); + uint8_t byte_two = EmitVexByte2(/*w=*/ true, + /*l=*/ 128, + X86_64ManagedRegister::FromCpuRegister( + src1.AsRegister()), + /*pp=*/ 0); + EmitUint8(byte_zero); + EmitUint8(byte_one); + EmitUint8(byte_two); + // Opcode field + EmitUint8(0xF2); + EmitRegisterOperand(dst.LowBits(), src2.LowBits()); +} + +void X86_64Assembler::andnpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x55); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::andnps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x55); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pandn(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xDF); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::orpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x56); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::orps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x56); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::por(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xEB); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pavgb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xE0); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pavgw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xE3); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::psadbw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xF6); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pmaddwd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xF5); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::phaddw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x01); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::phaddd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x02); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::haddps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x7C); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::haddpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x7C); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::phsubw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x05); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::phsubd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x06); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::hsubps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x7D); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::hsubpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x7D); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pminsb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x38); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pmaxsb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x3C); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pminsw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xEA); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pmaxsw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xEE); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pminsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x39); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pmaxsd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x3D); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pminub(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xDA); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pmaxub(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xDE); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pminuw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x3A); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pmaxuw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x3E); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pminud(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x3B); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pmaxud(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x3F); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::minps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5D); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::maxps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5F); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::minpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5D); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::maxpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x5F); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pcmpeqb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x74); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pcmpeqw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x75); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pcmpeqd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x76); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pcmpeqq(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x29); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pcmpgtb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x64); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pcmpgtw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x65); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pcmpgtd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x66); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pcmpgtq(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x37); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::shufpd(XmmRegister dst, XmmRegister src, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xC6); + EmitXmmRegisterOperand(dst.LowBits(), src); + EmitUint8(imm.value()); +} + + +void X86_64Assembler::shufps(XmmRegister dst, XmmRegister src, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xC6); + EmitXmmRegisterOperand(dst.LowBits(), src); + EmitUint8(imm.value()); +} + + +void X86_64Assembler::pshufd(XmmRegister dst, XmmRegister src, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x70); + EmitXmmRegisterOperand(dst.LowBits(), src); + EmitUint8(imm.value()); +} + + +void X86_64Assembler::punpcklbw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x60); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::punpcklwd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x61); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::punpckldq(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x62); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::punpcklqdq(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x6C); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::punpckhbw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x68); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::punpckhwd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x69); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::punpckhdq(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x6A); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::punpckhqdq(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x6D); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::psllw(XmmRegister reg, const Immediate &shift_count) { + DCHECK(shift_count.is_uint8()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex(false, false, false, false, reg.NeedsRex()); + EmitUint8(0x0F); + EmitUint8(0x71); + EmitXmmRegisterOperand(6, reg); + EmitUint8(shift_count.value()); +} + + +void X86_64Assembler::pslld(XmmRegister reg, const Immediate &shift_count) { + DCHECK(shift_count.is_uint8()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex(false, false, false, false, reg.NeedsRex()); + EmitUint8(0x0F); + EmitUint8(0x72); + EmitXmmRegisterOperand(6, reg); + EmitUint8(shift_count.value()); +} + + +void X86_64Assembler::psllq(XmmRegister reg, const Immediate &shift_count) { + DCHECK(shift_count.is_uint8()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex(false, false, false, false, reg.NeedsRex()); + EmitUint8(0x0F); + EmitUint8(0x73); + EmitXmmRegisterOperand(6, reg); + EmitUint8(shift_count.value()); +} + + +void X86_64Assembler::psraw(XmmRegister reg, const Immediate &shift_count) { + DCHECK(shift_count.is_uint8()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex(false, false, false, false, reg.NeedsRex()); + EmitUint8(0x0F); + EmitUint8(0x71); + EmitXmmRegisterOperand(4, reg); + EmitUint8(shift_count.value()); +} + + +void X86_64Assembler::psrad(XmmRegister reg, const Immediate &shift_count) { + DCHECK(shift_count.is_uint8()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex(false, false, false, false, reg.NeedsRex()); + EmitUint8(0x0F); + EmitUint8(0x72); + EmitXmmRegisterOperand(4, reg); + EmitUint8(shift_count.value()); +} + + +void X86_64Assembler::psrlw(XmmRegister reg, const Immediate &shift_count) { + DCHECK(shift_count.is_uint8()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex(false, false, false, false, reg.NeedsRex()); + EmitUint8(0x0F); + EmitUint8(0x71); + EmitXmmRegisterOperand(2, reg); + EmitUint8(shift_count.value()); +} + + +void X86_64Assembler::psrld(XmmRegister reg, const Immediate &shift_count) { + DCHECK(shift_count.is_uint8()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex(false, false, false, false, reg.NeedsRex()); + EmitUint8(0x0F); + EmitUint8(0x72); + EmitXmmRegisterOperand(2, reg); + EmitUint8(shift_count.value()); +} + + +void X86_64Assembler::psrlq(XmmRegister reg, const Immediate &shift_count) { + DCHECK(shift_count.is_uint8()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex(false, false, false, false, reg.NeedsRex()); + EmitUint8(0x0F); + EmitUint8(0x73); + EmitXmmRegisterOperand(2, reg); + EmitUint8(shift_count.value()); +} + + +void X86_64Assembler::psrldq(XmmRegister reg, const Immediate &shift_count) { + DCHECK(shift_count.is_uint8()); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex(false, false, false, false, reg.NeedsRex()); + EmitUint8(0x0F); + EmitUint8(0x73); + EmitXmmRegisterOperand(3, reg); + EmitUint8(shift_count.value()); +} + + +void X86_64Assembler::fldl(const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDD); + EmitOperand(0, src); +} + + +void X86_64Assembler::fstl(const Address &dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDD); + EmitOperand(2, dst); +} + + +void X86_64Assembler::fstpl(const Address &dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDD); + EmitOperand(3, dst); +} + + +void X86_64Assembler::fstsw() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x9B); + EmitUint8(0xDF); + EmitUint8(0xE0); +} + + +void X86_64Assembler::fnstcw(const Address &dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitOperand(7, dst); +} + + +void X86_64Assembler::fldcw(const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitOperand(5, src); +} + + +void X86_64Assembler::fistpl(const Address &dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDF); + EmitOperand(7, dst); +} + + +void X86_64Assembler::fistps(const Address &dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDB); + EmitOperand(3, dst); +} + + +void X86_64Assembler::fildl(const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDF); + EmitOperand(5, src); +} + + +void X86_64Assembler::filds(const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDB); + EmitOperand(0, src); +} + + +void X86_64Assembler::fincstp() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitUint8(0xF7); +} + + +void X86_64Assembler::ffree(const Immediate &index) { + CHECK_LT(index.value(), 7); + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDD); + EmitUint8(0xC0 + index.value()); +} + + +void X86_64Assembler::fsin() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitUint8(0xFE); +} + + +void X86_64Assembler::fcos() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitUint8(0xFF); +} + + +void X86_64Assembler::fptan() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitUint8(0xF2); +} + +void X86_64Assembler::fucompp() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xDA); + EmitUint8(0xE9); +} + + +void X86_64Assembler::fprem() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xD9); + EmitUint8(0xF8); +} + + +void X86_64Assembler::xchgl(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + // There is a short version for rax. + // It's a bit awkward, as CpuRegister has a const field, so assignment and thus swapping doesn't + // work. + const bool src_rax = src.AsRegister() == RAX; + const bool dst_rax = dst.AsRegister() == RAX; + if (src_rax || dst_rax) { + EmitOptionalRex32(src_rax ? dst : src); + EmitUint8(0x90 + (src_rax ? dst.LowBits() : src.LowBits())); + return; + } + + // General case. + EmitOptionalRex32(src, dst); + EmitUint8(0x87); + EmitRegisterOperand(src.LowBits(), dst.LowBits()); +} + + +void X86_64Assembler::xchgq(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + // There is a short version for rax. + // It's a bit awkward, as CpuRegister has a const field, so assignment and thus swapping doesn't + // work. + const bool src_rax = src.AsRegister() == RAX; + const bool dst_rax = dst.AsRegister() == RAX; + if (src_rax || dst_rax) { + // If src == target, emit a nop instead. + if (src_rax && dst_rax) { + EmitUint8(0x90); + } else { + EmitRex64(src_rax ? dst : src); + EmitUint8(0x90 + (src_rax ? dst.LowBits() : src.LowBits())); + } + return; + } + + // General case. + EmitRex64(src, dst); + EmitUint8(0x87); + EmitRegisterOperand(src.LowBits(), dst.LowBits()); +} + + +void X86_64Assembler::xchgl(CpuRegister reg, const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg, address); + EmitUint8(0x87); + EmitOperand(reg.LowBits(), address); +} + + +void X86_64Assembler::cmpb(const Address &address, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_int32()); + EmitOptionalRex32(address); + EmitUint8(0x80); + EmitOperand(7, address); + EmitUint8(imm.value() & 0xFF); +} + + +void X86_64Assembler::cmpw(const Address &address, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_int32()); + EmitOperandSizeOverride(); + EmitOptionalRex32(address); + EmitComplex(7, address, imm, /* is_16_op= */ true); +} + + +void X86_64Assembler::cmpl(CpuRegister reg, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_int32()); + EmitOptionalRex32(reg); + EmitComplex(7, Operand(reg), imm); +} + + +void X86_64Assembler::cmpl(CpuRegister reg0, CpuRegister reg1) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg0, reg1); + EmitUint8(0x3B); + EmitOperand(reg0.LowBits(), Operand(reg1)); +} + + +void X86_64Assembler::cmpl(CpuRegister reg, const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg, address); + EmitUint8(0x3B); + EmitOperand(reg.LowBits(), address); +} + + +void X86_64Assembler::cmpl(const Address &address, CpuRegister reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg, address); + EmitUint8(0x39); + EmitOperand(reg.LowBits(), address); +} + + +void X86_64Assembler::cmpl(const Address &address, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_int32()); + EmitOptionalRex32(address); + EmitComplex(7, address, imm); +} + + +void X86_64Assembler::cmpq(CpuRegister reg0, CpuRegister reg1) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(reg0, reg1); + EmitUint8(0x3B); + EmitOperand(reg0.LowBits(), Operand(reg1)); +} + + +void X86_64Assembler::cmpq(CpuRegister reg, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_int32()); // cmpq only supports 32b immediate. + EmitRex64(reg); + EmitComplex(7, Operand(reg), imm); +} + + +void X86_64Assembler::cmpq(CpuRegister reg, const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(reg, address); + EmitUint8(0x3B); + EmitOperand(reg.LowBits(), address); +} + + +void X86_64Assembler::cmpq(const Address &address, const Immediate &imm) { + CHECK(imm.is_int32()); // cmpq only supports 32b immediate. + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(address); + EmitComplex(7, address, imm); +} + + +void X86_64Assembler::addl(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x03); + EmitRegisterOperand(dst.LowBits(), src.LowBits()); +} + + +void X86_64Assembler::addl(CpuRegister reg, const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg, address); + EmitUint8(0x03); + EmitOperand(reg.LowBits(), address); +} + + +void X86_64Assembler::testl(CpuRegister reg1, CpuRegister reg2) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg1, reg2); + EmitUint8(0x85); + EmitRegisterOperand(reg1.LowBits(), reg2.LowBits()); +} + + +void X86_64Assembler::testl(CpuRegister reg, const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg, address); + EmitUint8(0x85); + EmitOperand(reg.LowBits(), address); +} + + +void X86_64Assembler::testl(CpuRegister reg, const Immediate &immediate) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + // For registers that have a byte variant (RAX, RBX, RCX, and RDX) + // we only test the byte CpuRegister to keep the encoding short. + if (immediate.is_uint8() && reg.AsRegister() < 4) { + // Use zero-extended 8-bit immediate. + if (reg.AsRegister() == RAX) { + EmitUint8(0xA8); + } else { + EmitUint8(0xF6); + EmitUint8(0xC0 + reg.AsRegister()); + } + EmitUint8(immediate.value() & 0xFF); + } else if (reg.AsRegister() == RAX) { + // Use short form if the destination is RAX. + EmitUint8(0xA9); + EmitImmediate(immediate); + } else { + EmitOptionalRex32(reg); + EmitUint8(0xF7); + EmitOperand(0, Operand(reg)); + EmitImmediate(immediate); + } +} + + +void X86_64Assembler::testq(CpuRegister reg1, CpuRegister reg2) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(reg1, reg2); + EmitUint8(0x85); + EmitRegisterOperand(reg1.LowBits(), reg2.LowBits()); +} + + +void X86_64Assembler::testq(CpuRegister reg, const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(reg, address); + EmitUint8(0x85); + EmitOperand(reg.LowBits(), address); +} + + +void X86_64Assembler::testb(const Address &dst, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst); + EmitUint8(0xF6); + EmitOperand(Register::RAX, dst); + CHECK(imm.is_int8()); + EmitUint8(imm.value() & 0xFF); +} + + +void X86_64Assembler::testl(const Address &dst, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst); + EmitUint8(0xF7); + EmitOperand(0, dst); + EmitImmediate(imm); +} + + +void X86_64Assembler::andl(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x23); + EmitOperand(dst.LowBits(), Operand(src)); +} + + +void X86_64Assembler::andl(CpuRegister reg, const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg, address); + EmitUint8(0x23); + EmitOperand(reg.LowBits(), address); +} + + +void X86_64Assembler::andl(CpuRegister dst, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst); + EmitComplex(4, Operand(dst), imm); +} + + +void X86_64Assembler::andq(CpuRegister reg, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_int32()); // andq only supports 32b immediate. + EmitRex64(reg); + EmitComplex(4, Operand(reg), imm); +} + + +void X86_64Assembler::andq(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, src); + EmitUint8(0x23); + EmitOperand(dst.LowBits(), Operand(src)); +} + + +void X86_64Assembler::andq(CpuRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, src); + EmitUint8(0x23); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::orl(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0B); + EmitOperand(dst.LowBits(), Operand(src)); +} + + +void X86_64Assembler::orl(CpuRegister reg, const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg, address); + EmitUint8(0x0B); + EmitOperand(reg.LowBits(), address); +} + + +void X86_64Assembler::orl(CpuRegister dst, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst); + EmitComplex(1, Operand(dst), imm); +} + + +void X86_64Assembler::orq(CpuRegister dst, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_int32()); // orq only supports 32b immediate. + EmitRex64(dst); + EmitComplex(1, Operand(dst), imm); +} + + +void X86_64Assembler::orq(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, src); + EmitUint8(0x0B); + EmitOperand(dst.LowBits(), Operand(src)); +} + + +void X86_64Assembler::orq(CpuRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, src); + EmitUint8(0x0B); + EmitOperand(dst.LowBits(), src); +} + + +void X86_64Assembler::xorl(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x33); + EmitOperand(dst.LowBits(), Operand(src)); +} + + +void X86_64Assembler::xorl(CpuRegister reg, const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg, address); + EmitUint8(0x33); + EmitOperand(reg.LowBits(), address); +} + + +void X86_64Assembler::xorl(CpuRegister dst, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst); + EmitComplex(6, Operand(dst), imm); +} + + +void X86_64Assembler::xorq(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, src); + EmitUint8(0x33); + EmitOperand(dst.LowBits(), Operand(src)); +} + + +void X86_64Assembler::xorq(CpuRegister dst, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_int32()); // xorq only supports 32b immediate. + EmitRex64(dst); + EmitComplex(6, Operand(dst), imm); +} + +void X86_64Assembler::xorq(CpuRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, src); + EmitUint8(0x33); + EmitOperand(dst.LowBits(), src); +} + + +#if 0 +void X86_64Assembler::rex(bool force, bool w, Register* r, Register* x, Register* b) { + // REX.WRXB + // W - 64-bit operand + // R - MODRM.reg + // X - SIB.index + // B - MODRM.rm/SIB.base + uint8_t rex = force ? 0x40 : 0; + if (w) { + rex |= 0x48; // REX.W000 + } + if (r != nullptr && *r >= Register::R8 && *r < Register::kNumberOfCpuRegisters) { + rex |= 0x44; // REX.0R00 + *r = static_cast(*r - 8); + } + if (x != nullptr && *x >= Register::R8 && *x < Register::kNumberOfCpuRegisters) { + rex |= 0x42; // REX.00X0 + *x = static_cast(*x - 8); + } + if (b != nullptr && *b >= Register::R8 && *b < Register::kNumberOfCpuRegisters) { + rex |= 0x41; // REX.000B + *b = static_cast(*b - 8); + } + if (rex != 0) { + EmitUint8(rex); + } +} + +void X86_64Assembler::rex_reg_mem(bool force, bool w, Register* dst, const Address& mem) { + // REX.WRXB + // W - 64-bit operand + // R - MODRM.reg + // X - SIB.index + // B - MODRM.rm/SIB.base + uint8_t rex = mem->rex(); + if (force) { + rex |= 0x40; // REX.0000 + } + if (w) { + rex |= 0x48; // REX.W000 + } + if (dst != nullptr && *dst >= Register::R8 && *dst < Register::kNumberOfCpuRegisters) { + rex |= 0x44; // REX.0R00 + *dst = static_cast(*dst - 8); + } + if (rex != 0) { + EmitUint8(rex); + } +} + +void rex_mem_reg(bool force, bool w, Address* mem, Register* src); +#endif + +void X86_64Assembler::addl(CpuRegister reg, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg); + EmitComplex(0, Operand(reg), imm); +} + + +void X86_64Assembler::addq(CpuRegister reg, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_int32()); // addq only supports 32b immediate. + EmitRex64(reg); + EmitComplex(0, Operand(reg), imm); +} + + +void X86_64Assembler::addq(CpuRegister dst, const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, address); + EmitUint8(0x03); + EmitOperand(dst.LowBits(), address); +} + + +void X86_64Assembler::addq(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + // 0x01 is addq r/m64 <- r/m64 + r64, with op1 in r/m and op2 in reg: so reverse EmitRex64 + EmitRex64(src, dst); + EmitUint8(0x01); + EmitRegisterOperand(src.LowBits(), dst.LowBits()); +} + + +void X86_64Assembler::addl(const Address &address, CpuRegister reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg, address); + EmitUint8(0x01); + EmitOperand(reg.LowBits(), address); +} + + +void X86_64Assembler::addl(const Address &address, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(address); + EmitComplex(0, address, imm); +} + + +void X86_64Assembler::addw(const Address &address, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_uint16() || imm.is_int16()) << imm.value(); + EmitUint8(0x66); + EmitOptionalRex32(address); + EmitComplex(0, address, imm, /* is_16_op= */ true); +} + + +void X86_64Assembler::subl(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x2B); + EmitOperand(dst.LowBits(), Operand(src)); +} + + +void X86_64Assembler::subl(CpuRegister reg, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg); + EmitComplex(5, Operand(reg), imm); +} + + +void X86_64Assembler::subq(CpuRegister reg, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_int32()); // subq only supports 32b immediate. + EmitRex64(reg); + EmitComplex(5, Operand(reg), imm); +} + + +void X86_64Assembler::subq(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, src); + EmitUint8(0x2B); + EmitRegisterOperand(dst.LowBits(), src.LowBits()); +} + + +void X86_64Assembler::subq(CpuRegister reg, const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(reg, address); + EmitUint8(0x2B); + EmitOperand(reg.LowBits() & 7, address); +} + + +void X86_64Assembler::subl(CpuRegister reg, const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg, address); + EmitUint8(0x2B); + EmitOperand(reg.LowBits(), address); +} + + +void X86_64Assembler::cdq() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x99); +} + + +void X86_64Assembler::cqo() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(); + EmitUint8(0x99); +} + + +void X86_64Assembler::idivl(CpuRegister reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg); + EmitUint8(0xF7); + EmitUint8(0xF8 | reg.LowBits()); +} + + +void X86_64Assembler::idivq(CpuRegister reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(reg); + EmitUint8(0xF7); + EmitUint8(0xF8 | reg.LowBits()); +} + + +void X86_64Assembler::imull(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xAF); + EmitOperand(dst.LowBits(), Operand(src)); +} + +void X86_64Assembler::imull(CpuRegister dst, CpuRegister src, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_int32()); // imull only supports 32b immediate. + + EmitOptionalRex32(dst, src); + + // See whether imm can be represented as a sign-extended 8bit value. + int32_t v32 = static_cast(imm.value()); + if (IsInt<8>(v32)) { + // Sign-extension works. + EmitUint8(0x6B); + EmitOperand(dst.LowBits(), Operand(src)); + EmitUint8(static_cast(v32 & 0xFF)); + } else { + // Not representable, use full immediate. + EmitUint8(0x69); + EmitOperand(dst.LowBits(), Operand(src)); + EmitImmediate(imm); + } +} + + +void X86_64Assembler::imull(CpuRegister reg, const Immediate &imm) { + imull(reg, reg, imm); +} + + +void X86_64Assembler::imull(CpuRegister reg, const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg, address); + EmitUint8(0x0F); + EmitUint8(0xAF); + EmitOperand(reg.LowBits(), address); +} + + +void X86_64Assembler::imulq(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, src); + EmitUint8(0x0F); + EmitUint8(0xAF); + EmitRegisterOperand(dst.LowBits(), src.LowBits()); +} + + +void X86_64Assembler::imulq(CpuRegister reg, const Immediate &imm) { + imulq(reg, reg, imm); +} + +void X86_64Assembler::imulq(CpuRegister dst, CpuRegister reg, const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_int32()); // imulq only supports 32b immediate. + + EmitRex64(dst, reg); + + // See whether imm can be represented as a sign-extended 8bit value. + int64_t v64 = imm.value(); + if (IsInt<8>(v64)) { + // Sign-extension works. + EmitUint8(0x6B); + EmitOperand(dst.LowBits(), Operand(reg)); + EmitUint8(static_cast(v64 & 0xFF)); + } else { + // Not representable, use full immediate. + EmitUint8(0x69); + EmitOperand(dst.LowBits(), Operand(reg)); + EmitImmediate(imm); + } +} + +void X86_64Assembler::imulq(CpuRegister reg, const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(reg, address); + EmitUint8(0x0F); + EmitUint8(0xAF); + EmitOperand(reg.LowBits(), address); +} + + +void X86_64Assembler::imull(CpuRegister reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg); + EmitUint8(0xF7); + EmitOperand(5, Operand(reg)); +} + + +void X86_64Assembler::imulq(CpuRegister reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(reg); + EmitUint8(0xF7); + EmitOperand(5, Operand(reg)); +} + + +void X86_64Assembler::imull(const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(address); + EmitUint8(0xF7); + EmitOperand(5, address); +} + + +void X86_64Assembler::mull(CpuRegister reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg); + EmitUint8(0xF7); + EmitOperand(4, Operand(reg)); +} + + +void X86_64Assembler::mull(const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(address); + EmitUint8(0xF7); + EmitOperand(4, address); +} + + +void X86_64Assembler::shll(CpuRegister reg, const Immediate &imm) { + EmitGenericShift(false, 4, reg, imm); +} + + +void X86_64Assembler::shlq(CpuRegister reg, const Immediate &imm) { + EmitGenericShift(true, 4, reg, imm); +} + + +void X86_64Assembler::shll(CpuRegister operand, CpuRegister shifter) { + EmitGenericShift(false, 4, operand, shifter); +} + + +void X86_64Assembler::shlq(CpuRegister operand, CpuRegister shifter) { + EmitGenericShift(true, 4, operand, shifter); +} + + +void X86_64Assembler::shrl(CpuRegister reg, const Immediate &imm) { + EmitGenericShift(false, 5, reg, imm); +} + + +void X86_64Assembler::shrq(CpuRegister reg, const Immediate &imm) { + EmitGenericShift(true, 5, reg, imm); +} + + +void X86_64Assembler::shrl(CpuRegister operand, CpuRegister shifter) { + EmitGenericShift(false, 5, operand, shifter); +} + + +void X86_64Assembler::shrq(CpuRegister operand, CpuRegister shifter) { + EmitGenericShift(true, 5, operand, shifter); +} + + +void X86_64Assembler::sarl(CpuRegister reg, const Immediate &imm) { + EmitGenericShift(false, 7, reg, imm); +} + + +void X86_64Assembler::sarl(CpuRegister operand, CpuRegister shifter) { + EmitGenericShift(false, 7, operand, shifter); +} + + +void X86_64Assembler::sarq(CpuRegister reg, const Immediate &imm) { + EmitGenericShift(true, 7, reg, imm); +} + + +void X86_64Assembler::sarq(CpuRegister operand, CpuRegister shifter) { + EmitGenericShift(true, 7, operand, shifter); +} + + +void X86_64Assembler::roll(CpuRegister reg, const Immediate &imm) { + EmitGenericShift(false, 0, reg, imm); +} + + +void X86_64Assembler::roll(CpuRegister operand, CpuRegister shifter) { + EmitGenericShift(false, 0, operand, shifter); +} + + +void X86_64Assembler::rorl(CpuRegister reg, const Immediate &imm) { + EmitGenericShift(false, 1, reg, imm); +} + + +void X86_64Assembler::rorl(CpuRegister operand, CpuRegister shifter) { + EmitGenericShift(false, 1, operand, shifter); +} + + +void X86_64Assembler::rolq(CpuRegister reg, const Immediate &imm) { + EmitGenericShift(true, 0, reg, imm); +} + + +void X86_64Assembler::rolq(CpuRegister operand, CpuRegister shifter) { + EmitGenericShift(true, 0, operand, shifter); +} + + +void X86_64Assembler::rorq(CpuRegister reg, const Immediate &imm) { + EmitGenericShift(true, 1, reg, imm); +} + + +void X86_64Assembler::rorq(CpuRegister operand, CpuRegister shifter) { + EmitGenericShift(true, 1, operand, shifter); +} + + +void X86_64Assembler::negl(CpuRegister reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg); + EmitUint8(0xF7); + EmitOperand(3, Operand(reg)); +} + + +void X86_64Assembler::negq(CpuRegister reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(reg); + EmitUint8(0xF7); + EmitOperand(3, Operand(reg)); +} + + +void X86_64Assembler::notl(CpuRegister reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg); + EmitUint8(0xF7); + EmitUint8(0xD0 | reg.LowBits()); +} + + +void X86_64Assembler::notq(CpuRegister reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(reg); + EmitUint8(0xF7); + EmitOperand(2, Operand(reg)); +} + + +void X86_64Assembler::enter(const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xC8); + CHECK(imm.is_uint16()) << imm.value(); + EmitUint8(imm.value() & 0xFF); + EmitUint8((imm.value() >> 8) & 0xFF); + EmitUint8(0x00); +} + + +void X86_64Assembler::leave() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xC9); +} + + +void X86_64Assembler::ret() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xC3); +} + + +void X86_64Assembler::ret(const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xC2); + CHECK(imm.is_uint16()); + EmitUint8(imm.value() & 0xFF); + EmitUint8((imm.value() >> 8) & 0xFF); +} + + +void X86_64Assembler::nop() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x90); +} + + +void X86_64Assembler::int3() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xCC); +} + + +void X86_64Assembler::hlt() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF4); +} + + +void X86_64Assembler::j(Condition condition, Label *label) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + if (label->IsBound()) { + static const int kShortSize = 2; + static const int kLongSize = 6; + int offset = label->Position() - buffer_.Size(); + CHECK_LE(offset, 0); + if (IsInt<8>(offset - kShortSize)) { + EmitUint8(0x70 + condition); + EmitUint8((offset - kShortSize) & 0xFF); + } else { + EmitUint8(0x0F); + EmitUint8(0x80 + condition); + EmitInt32(offset - kLongSize); + } + } else { + EmitUint8(0x0F); + EmitUint8(0x80 + condition); + EmitLabelLink(label); + } +} + + +void X86_64Assembler::j(Condition condition, NearLabel *label) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + if (label->IsBound()) { + static const int kShortSize = 2; + int offset = label->Position() - buffer_.Size(); + CHECK_LE(offset, 0); + CHECK(IsInt<8>(offset - kShortSize)); + EmitUint8(0x70 + condition); + EmitUint8((offset - kShortSize) & 0xFF); + } else { + EmitUint8(0x70 + condition); + EmitLabelLink(label); + } +} + + +void X86_64Assembler::jrcxz(NearLabel *label) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + if (label->IsBound()) { + static const int kShortSize = 2; + int offset = label->Position() - buffer_.Size(); + CHECK_LE(offset, 0); + CHECK(IsInt<8>(offset - kShortSize)); + EmitUint8(0xE3); + EmitUint8((offset - kShortSize) & 0xFF); + } else { + EmitUint8(0xE3); + EmitLabelLink(label); + } +} + + +void X86_64Assembler::jmp(CpuRegister reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg); + EmitUint8(0xFF); + EmitRegisterOperand(4, reg.LowBits()); +} + +void X86_64Assembler::jmp(const Address &address) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(address); + EmitUint8(0xFF); + EmitOperand(4, address); +} + +void X86_64Assembler::jmp(Label *label) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + if (label->IsBound()) { + static const int kShortSize = 2; + static const int kLongSize = 5; + int offset = label->Position() - buffer_.Size(); + CHECK_LE(offset, 0); + if (IsInt<8>(offset - kShortSize)) { + EmitUint8(0xEB); + EmitUint8((offset - kShortSize) & 0xFF); + } else { + EmitUint8(0xE9); + EmitInt32(offset - kLongSize); + } + } else { + EmitUint8(0xE9); + EmitLabelLink(label); + } +} + + +void X86_64Assembler::jmp(NearLabel *label) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + if (label->IsBound()) { + static const int kShortSize = 2; + int offset = label->Position() - buffer_.Size(); + CHECK_LE(offset, 0); + CHECK(IsInt<8>(offset - kShortSize)); + EmitUint8(0xEB); + EmitUint8((offset - kShortSize) & 0xFF); + } else { + EmitUint8(0xEB); + EmitLabelLink(label); + } +} + + +void X86_64Assembler::rep_movsw() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0xF3); + EmitUint8(0xA5); +} + + +X86_64Assembler *X86_64Assembler::lock() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF0); + return this; +} + + +void X86_64Assembler::cmpxchgl(const Address &address, CpuRegister reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(reg, address); + EmitUint8(0x0F); + EmitUint8(0xB1); + EmitOperand(reg.LowBits(), address); +} + + +void X86_64Assembler::cmpxchgq(const Address &address, CpuRegister reg) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(reg, address); + EmitUint8(0x0F); + EmitUint8(0xB1); + EmitOperand(reg.LowBits(), address); +} + + +void X86_64Assembler::mfence() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xAE); + EmitUint8(0xF0); +} + + +X86_64Assembler *X86_64Assembler::gs() { + // TODO: gs is a prefix and not an instruction + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x65); + return this; +} + + +void X86_64Assembler::AddImmediate(CpuRegister reg, const Immediate &imm) { + int value = imm.value(); + if (value != 0) { + if (value > 0) { + addl(reg, imm); + } else { + subl(reg, Immediate(value)); + } + } +} + + +void X86_64Assembler::setcc(Condition condition, CpuRegister dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + // RSP, RBP, RDI, RSI need rex prefix (else the pattern encodes ah/bh/ch/dh). + if (dst.NeedsRex() || dst.AsRegister() > 3) { + EmitOptionalRex(true, false, false, false, dst.NeedsRex()); + } + EmitUint8(0x0F); + EmitUint8(0x90 + condition); + EmitUint8(0xC0 + dst.LowBits()); +} + +void X86_64Assembler::blsi(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false); + uint8_t byte_one = EmitVexByte1(/*r=*/ false, + /*x=*/ false, + src.NeedsRex(), + /*mmmmm=*/ 2); + uint8_t byte_two = EmitVexByte2(/*w=*/ true, + /*l=*/ 128, + X86_64ManagedRegister::FromCpuRegister(dst.AsRegister()), + /*pp=*/ 0); + EmitUint8(byte_zero); + EmitUint8(byte_one); + EmitUint8(byte_two); + EmitUint8(0xF3); + EmitRegisterOperand(3, src.LowBits()); +} + +void X86_64Assembler::blsmsk(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false); + uint8_t byte_one = EmitVexByte1(/*r=*/ false, + /*x=*/ false, + src.NeedsRex(), + /*mmmmm=*/ 2); + uint8_t byte_two = EmitVexByte2(/*w=*/ true, + /*l=*/ 128, + X86_64ManagedRegister::FromCpuRegister(dst.AsRegister()), + /*pp=*/ 0); + EmitUint8(byte_zero); + EmitUint8(byte_one); + EmitUint8(byte_two); + EmitUint8(0xF3); + EmitRegisterOperand(2, src.LowBits()); +} + +void X86_64Assembler::blsr(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false); + uint8_t byte_one = EmitVexByte1(/*r=*/ false, + /*x=*/ false, + src.NeedsRex(), + /*mmmmm=*/ 2); + uint8_t byte_two = EmitVexByte2(/*w=*/ true, + /*l=*/ 128, + X86_64ManagedRegister::FromCpuRegister(dst.AsRegister()), + /*pp=*/ 0); + EmitUint8(byte_zero); + EmitUint8(byte_one); + EmitUint8(byte_two); + EmitUint8(0xF3); + EmitRegisterOperand(1, src.LowBits()); +} + +void X86_64Assembler::bswapl(CpuRegister dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex(false, false, false, false, dst.NeedsRex()); + EmitUint8(0x0F); + EmitUint8(0xC8 + dst.LowBits()); +} + +void X86_64Assembler::bswapq(CpuRegister dst) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex(false, true, false, false, dst.NeedsRex()); + EmitUint8(0x0F); + EmitUint8(0xC8 + dst.LowBits()); +} + +void X86_64Assembler::bsfl(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xBC); + EmitRegisterOperand(dst.LowBits(), src.LowBits()); +} + +void X86_64Assembler::bsfl(CpuRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xBC); + EmitOperand(dst.LowBits(), src); +} + +void X86_64Assembler::bsfq(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, src); + EmitUint8(0x0F); + EmitUint8(0xBC); + EmitRegisterOperand(dst.LowBits(), src.LowBits()); +} + +void X86_64Assembler::bsfq(CpuRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, src); + EmitUint8(0x0F); + EmitUint8(0xBC); + EmitOperand(dst.LowBits(), src); +} + +void X86_64Assembler::bsrl(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xBD); + EmitRegisterOperand(dst.LowBits(), src.LowBits()); +} + +void X86_64Assembler::bsrl(CpuRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xBD); + EmitOperand(dst.LowBits(), src); +} + +void X86_64Assembler::bsrq(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, src); + EmitUint8(0x0F); + EmitUint8(0xBD); + EmitRegisterOperand(dst.LowBits(), src.LowBits()); +} + +void X86_64Assembler::bsrq(CpuRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, src); + EmitUint8(0x0F); + EmitUint8(0xBD); + EmitOperand(dst.LowBits(), src); +} + +void X86_64Assembler::popcntl(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xB8); + EmitRegisterOperand(dst.LowBits(), src.LowBits()); +} + +void X86_64Assembler::popcntl(CpuRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xB8); + EmitOperand(dst.LowBits(), src); +} + +void X86_64Assembler::popcntq(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitRex64(dst, src); + EmitUint8(0x0F); + EmitUint8(0xB8); + EmitRegisterOperand(dst.LowBits(), src.LowBits()); +} + +void X86_64Assembler::popcntq(CpuRegister dst, const Address &src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitRex64(dst, src); + EmitUint8(0x0F); + EmitUint8(0xB8); + EmitOperand(dst.LowBits(), src); +} + +void X86_64Assembler::repne_scasb() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF2); + EmitUint8(0xAE); +} + +void X86_64Assembler::repne_scasw() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0xF2); + EmitUint8(0xAF); +} + +void X86_64Assembler::repe_cmpsw() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0xF3); + EmitUint8(0xA7); +} + + +void X86_64Assembler::repe_cmpsl() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0xA7); +} + + +void X86_64Assembler::repe_cmpsq() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitRex64(); + EmitUint8(0xA7); +} + + +void X86_64Assembler::LoadDoubleConstant(XmmRegister dst, double value) { + // TODO: Need to have a code constants table. + int64_t constant = bit_cast(value); + pushq(Immediate(High32Bits(constant))); + pushq(Immediate(Low32Bits(constant))); + movsd(dst, Address(CpuRegister(RSP), 0)); + addq(CpuRegister(RSP), Immediate(2 * sizeof(intptr_t))); +} + + +void X86_64Assembler::Align(int alignment, int offset) { + CHECK(IsPowerOfTwo(alignment)); + // Emit nop instruction until the real position is aligned. + while (((offset + buffer_.GetPosition()) & (alignment - 1)) != 0) { + nop(); + } +} + + +void X86_64Assembler::Bind(Label *label) { + int bound = buffer_.Size(); + CHECK(!label->IsBound()); // Labels can only be bound once. + while (label->IsLinked()) { + int position = label->LinkPosition(); + int next = buffer_.Load(position); + buffer_.Store(position, bound - (position + 4)); + label->position_ = next; + } + label->BindTo(bound); +} + + +void X86_64Assembler::Bind(NearLabel *label) { + int bound = buffer_.Size(); + CHECK(!label->IsBound()); // Labels can only be bound once. + while (label->IsLinked()) { + int position = label->LinkPosition(); + uint8_t delta = buffer_.Load(position); + int offset = bound - (position + 1); + CHECK(IsInt<8>(offset)); + buffer_.Store(position, offset); + label->position_ = delta != 0u ? label->position_ - delta : 0; + } + label->BindTo(bound); +} + + +void X86_64Assembler::EmitOperand(uint8_t reg_or_opcode, const Operand &operand) { + CHECK_GE(reg_or_opcode, 0); + CHECK_LT(reg_or_opcode, 8); + const int length = operand.length_; + CHECK_GT(length, 0); + // Emit the ModRM byte updated with the given reg value. + CHECK_EQ(operand.encoding_[0] & 0x38, 0); + EmitUint8(operand.encoding_[0] + (reg_or_opcode << 3)); + // Emit the rest of the encoded operand. + for (int i = 1; i < length; i++) { + EmitUint8(operand.encoding_[i]); + } + AssemblerFixup *fixup = operand.GetFixup(); + if (fixup != nullptr) { + EmitFixup(fixup); + } +} + + +void X86_64Assembler::EmitImmediate(const Immediate &imm, bool is_16_op) { + if (is_16_op) { + EmitUint8(imm.value() & 0xFF); + EmitUint8(imm.value() >> 8); + } else if (imm.is_int32()) { + EmitInt32(static_cast(imm.value())); + } else { + EmitInt64(imm.value()); + } +} + + +void X86_64Assembler::EmitComplex(uint8_t reg_or_opcode, + const Operand &operand, + const Immediate &immediate, + bool is_16_op) { + CHECK_GE(reg_or_opcode, 0); + CHECK_LT(reg_or_opcode, 8); + if (immediate.is_int8()) { + // Use sign-extended 8-bit immediate. + EmitUint8(0x83); + EmitOperand(reg_or_opcode, operand); + EmitUint8(immediate.value() & 0xFF); + } else if (operand.IsRegister(CpuRegister(RAX))) { + // Use short form if the destination is eax. + EmitUint8(0x05 + (reg_or_opcode << 3)); + EmitImmediate(immediate, is_16_op); + } else { + EmitUint8(0x81); + EmitOperand(reg_or_opcode, operand); + EmitImmediate(immediate, is_16_op); + } +} + + +void X86_64Assembler::EmitLabel(Label *label, int instruction_size) { + if (label->IsBound()) { + int offset = label->Position() - buffer_.Size(); + CHECK_LE(offset, 0); + EmitInt32(offset - instruction_size); + } else { + EmitLabelLink(label); + } +} + + +void X86_64Assembler::EmitLabelLink(Label *label) { + CHECK(!label->IsBound()); + int position = buffer_.Size(); + EmitInt32(label->position_); + label->LinkTo(position); +} + + +void X86_64Assembler::EmitLabelLink(NearLabel *label) { + CHECK(!label->IsBound()); + int position = buffer_.Size(); + if (label->IsLinked()) { + // Save the delta in the byte that we have to play with. + uint32_t delta = position - label->LinkPosition(); + CHECK(IsUint<8>(delta)); + EmitUint8(delta & 0xFF); + } else { + EmitUint8(0); + } + label->LinkTo(position); +} + + +void X86_64Assembler::EmitGenericShift(bool wide, + int reg_or_opcode, + CpuRegister reg, + const Immediate &imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_int8()); + if (wide) { + EmitRex64(reg); + } else { + EmitOptionalRex32(reg); + } + if (imm.value() == 1) { + EmitUint8(0xD1); + EmitOperand(reg_or_opcode, Operand(reg)); + } else { + EmitUint8(0xC1); + EmitOperand(reg_or_opcode, Operand(reg)); + EmitUint8(imm.value() & 0xFF); + } +} + + +void X86_64Assembler::EmitGenericShift(bool wide, + int reg_or_opcode, + CpuRegister operand, + CpuRegister shifter) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK_EQ(shifter.AsRegister(), RCX); + if (wide) { + EmitRex64(operand); + } else { + EmitOptionalRex32(operand); + } + EmitUint8(0xD3); + EmitOperand(reg_or_opcode, Operand(operand)); +} + +void X86_64Assembler::EmitOptionalRex(bool force, bool w, bool r, bool x, bool b) { + // REX.WRXB + // W - 64-bit operand + // R - MODRM.reg + // X - SIB.index + // B - MODRM.rm/SIB.base + uint8_t rex = force ? 0x40 : 0; + if (w) { + rex |= 0x48; // REX.W000 + } + if (r) { + rex |= 0x44; // REX.0R00 + } + if (x) { + rex |= 0x42; // REX.00X0 + } + if (b) { + rex |= 0x41; // REX.000B + } + if (rex != 0) { + EmitUint8(rex); + } +} + +void X86_64Assembler::EmitOptionalRex32(CpuRegister reg) { + EmitOptionalRex(false, false, false, false, reg.NeedsRex()); +} + +void X86_64Assembler::EmitOptionalRex32(CpuRegister dst, CpuRegister src) { + EmitOptionalRex(false, false, dst.NeedsRex(), false, src.NeedsRex()); +} + +void X86_64Assembler::EmitOptionalRex32(XmmRegister dst, XmmRegister src) { + EmitOptionalRex(false, false, dst.NeedsRex(), false, src.NeedsRex()); +} + +void X86_64Assembler::EmitOptionalRex32(CpuRegister dst, XmmRegister src) { + EmitOptionalRex(false, false, dst.NeedsRex(), false, src.NeedsRex()); +} + +void X86_64Assembler::EmitOptionalRex32(XmmRegister dst, CpuRegister src) { + EmitOptionalRex(false, false, dst.NeedsRex(), false, src.NeedsRex()); +} + +void X86_64Assembler::EmitOptionalRex32(const Operand &operand) { + uint8_t rex = operand.rex(); + if (rex != 0) { + EmitUint8(rex); + } +} + +void X86_64Assembler::EmitOptionalRex32(CpuRegister dst, const Operand &operand) { + uint8_t rex = operand.rex(); + if (dst.NeedsRex()) { + rex |= 0x44; // REX.0R00 + } + if (rex != 0) { + EmitUint8(rex); + } +} + +void X86_64Assembler::EmitOptionalRex32(XmmRegister dst, const Operand &operand) { + uint8_t rex = operand.rex(); + if (dst.NeedsRex()) { + rex |= 0x44; // REX.0R00 + } + if (rex != 0) { + EmitUint8(rex); + } +} + +void X86_64Assembler::EmitRex64() { + EmitOptionalRex(false, true, false, false, false); +} + +void X86_64Assembler::EmitRex64(CpuRegister reg) { + EmitOptionalRex(false, true, false, false, reg.NeedsRex()); +} + +void X86_64Assembler::EmitRex64(const Operand &operand) { + uint8_t rex = operand.rex(); + rex |= 0x48; // REX.W000 + EmitUint8(rex); +} + +void X86_64Assembler::EmitRex64(CpuRegister dst, CpuRegister src) { + EmitOptionalRex(false, true, dst.NeedsRex(), false, src.NeedsRex()); +} + +void X86_64Assembler::EmitRex64(XmmRegister dst, CpuRegister src) { + EmitOptionalRex(false, true, dst.NeedsRex(), false, src.NeedsRex()); +} + +void X86_64Assembler::EmitRex64(CpuRegister dst, XmmRegister src) { + EmitOptionalRex(false, true, dst.NeedsRex(), false, src.NeedsRex()); +} + +void X86_64Assembler::EmitRex64(CpuRegister dst, const Operand &operand) { + uint8_t rex = 0x48 | operand.rex(); // REX.W000 + if (dst.NeedsRex()) { + rex |= 0x44; // REX.0R00 + } + EmitUint8(rex); +} + +void X86_64Assembler::EmitRex64(XmmRegister dst, const Operand &operand) { + uint8_t rex = 0x48 | operand.rex(); // REX.W000 + if (dst.NeedsRex()) { + rex |= 0x44; // REX.0R00 + } + EmitUint8(rex); +} + +void X86_64Assembler::EmitOptionalByteRegNormalizingRex32(CpuRegister dst, CpuRegister src) { + // For src, SPL, BPL, SIL, DIL need the rex prefix. + bool force = src.AsRegister() > 3; + EmitOptionalRex(force, false, dst.NeedsRex(), false, src.NeedsRex()); +} + +void X86_64Assembler::EmitOptionalByteRegNormalizingRex32(CpuRegister dst, const Operand &operand) { + uint8_t rex = operand.rex(); + // For dst, SPL, BPL, SIL, DIL need the rex prefix. + bool force = dst.AsRegister() > 3; + if (force) { + rex |= 0x40; // REX.0000 + } + if (dst.NeedsRex()) { + rex |= 0x44; // REX.0R00 + } + if (rex != 0) { + EmitUint8(rex); + } +} + +void X86_64Assembler::AddConstantArea() { + ArrayRef area = constant_area_.GetBuffer(); + for (size_t i = 0, e = area.size(); i < e; i++) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitInt32(area[i]); + } +} + +size_t ConstantArea::AppendInt32(int32_t v) { + size_t result = buffer_.size() * elem_size_; + buffer_.push_back(v); + return result; +} + +size_t ConstantArea::AddInt32(int32_t v) { + // Look for an existing match. + for (size_t i = 0, e = buffer_.size(); i < e; i++) { + if (v == buffer_[i]) { + return i * elem_size_; + } + } + + // Didn't match anything. + return AppendInt32(v); +} + +size_t ConstantArea::AddInt64(int64_t v) { + int32_t v_low = v; + int32_t v_high = v >> 32; + if (buffer_.size() > 1) { + // Ensure we don't pass the end of the buffer. + for (size_t i = 0, e = buffer_.size() - 1; i < e; i++) { + if (v_low == buffer_[i] && v_high == buffer_[i + 1]) { + return i * elem_size_; + } + } + } + + // Didn't match anything. + size_t result = buffer_.size() * elem_size_; + buffer_.push_back(v_low); + buffer_.push_back(v_high); + return result; +} + +size_t ConstantArea::AddDouble(double v) { + // Treat the value as a 64-bit integer value. + return AddInt64(bit_cast(v)); +} + +size_t ConstantArea::AddFloat(float v) { + // Treat the value as a 32-bit integer value. + return AddInt32(bit_cast(v)); +} + +} // namespace x86_64 +} // namespace whale diff --git a/module/src/main/cpp/whale/src/assembler/x86_64/assembler_x86_64.h b/module/src/main/cpp/whale/src/assembler/x86_64/assembler_x86_64.h new file mode 100644 index 00000000..c1c294c5 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/x86_64/assembler_x86_64.h @@ -0,0 +1,1276 @@ +#ifndef WHALE_ASSEMBLER_X86_64_ASSEMBLER_X86_64_H_ +#define WHALE_ASSEMBLER_X86_64_ASSEMBLER_X86_64_H_ + +#include +#include +#include +#include +#include +#include "assembler/value_object.h" +#include "base/bit_utils.h" +#include "assembler/x86_64/registers_x86_64.h" +#include "assembler/x86_64/constants_x86_64.h" +#include "base/logging.h" +#include "assembler/assembler.h" +#include "base/offsets.h" +#include "managed_register_x86_64.h" + +// If true, references within the heap are poisoned (negated). +#ifdef USE_HEAP_POISONING +static constexpr bool kPoisonHeapReferences = true; +#else +static constexpr bool kPoisonHeapReferences = false; +#endif + +namespace whale { +namespace x86_64 { + + +// Encodes an immediate value for operands. +// +// Note: Immediates can be 64b on x86-64 for certain instructions, but are often restricted +// to 32b. +// +// Note: As we support cross-compilation, the value type must be int64_t. Please be aware of +// conversion rules in expressions regarding negation, especially size_t on 32b. +class Immediate : public ValueObject { + public: + explicit Immediate(int64_t value_in) : value_(value_in) {} + + int64_t value() const { return value_; } + + bool is_int8() const { return IsInt<8>(value_); } + + bool is_uint8() const { return IsUint<8>(value_); } + + bool is_int16() const { return IsInt<16>(value_); } + + bool is_uint16() const { return IsUint<16>(value_); } + + bool is_int32() const { return IsInt<32>(value_); } + + private: + const int64_t value_; +}; + + +class Operand : public ValueObject { + public: + uint8_t mod() const { + return (encoding_at(0) >> 6) & 3; + } + + Register rm() const { + return static_cast(encoding_at(0) & 7); + } + + ScaleFactor scale() const { + return static_cast((encoding_at(1) >> 6) & 3); + } + + Register index() const { + return static_cast((encoding_at(1) >> 3) & 7); + } + + Register base() const { + return static_cast(encoding_at(1) & 7); + } + + CpuRegister cpu_rm() const { + int ext = (rex_ & 1) != 0 ? x86_64::R8 : x86_64::RAX; + return static_cast(rm() + ext); + } + + CpuRegister cpu_index() const { + int ext = (rex_ & 2) != 0 ? x86_64::R8 : x86_64::RAX; + return static_cast(index() + ext); + } + + CpuRegister cpu_base() const { + int ext = (rex_ & 1) != 0 ? x86_64::R8 : x86_64::RAX; + return static_cast(base() + ext); + } + + uint8_t rex() const { + return rex_; + } + + int8_t disp8() const { + CHECK_GE(length_, 2); + return static_cast(encoding_[length_ - 1]); + } + + int32_t disp32() const { + CHECK_GE(length_, 5); + int32_t value; + memcpy(&value, &encoding_[length_ - 4], sizeof(value)); + return value; + } + + bool IsRegister(CpuRegister reg) const { + return ((encoding_[0] & 0xF8) == 0xC0) // Addressing mode is register only. + && ((encoding_[0] & 0x07) == reg.LowBits()) // Register codes match. + && (reg.NeedsRex() == ((rex_ & 1) != 0)); // REX.000B bits match. + } + + AssemblerFixup *GetFixup() const { + return fixup_; + } + + protected: + // Operand can be sub classed (e.g: Address). + Operand() : rex_(0), length_(0), fixup_(nullptr) {} + + void SetModRM(uint8_t mod_in, CpuRegister rm_in) { + CHECK_EQ(mod_in & ~3, 0); + if (rm_in.NeedsRex()) { + rex_ |= 0x41; // REX.000B + } + encoding_[0] = (mod_in << 6) | rm_in.LowBits(); + length_ = 1; + } + + void SetSIB(ScaleFactor scale_in, CpuRegister index_in, CpuRegister base_in) { + CHECK_EQ(length_, 1); + CHECK_EQ(scale_in & ~3, 0); + if (base_in.NeedsRex()) { + rex_ |= 0x41; // REX.000B + } + if (index_in.NeedsRex()) { + rex_ |= 0x42; // REX.00X0 + } + encoding_[1] = (scale_in << 6) | (static_cast(index_in.LowBits()) << 3) | + static_cast(base_in.LowBits()); + length_ = 2; + } + + void SetDisp8(int8_t disp) { + CHECK(length_ == 1 || length_ == 2); + encoding_[length_++] = static_cast(disp); + } + + void SetDisp32(int32_t disp) { + CHECK(length_ == 1 || length_ == 2); + int disp_size = sizeof(disp); + memmove(&encoding_[length_], &disp, disp_size); + length_ += disp_size; + } + + void SetFixup(AssemblerFixup *fixup) { + fixup_ = fixup; + } + + private: + uint8_t rex_; + uint8_t length_; + uint8_t encoding_[6]; + AssemblerFixup *fixup_; + + explicit Operand(CpuRegister reg) : rex_(0), length_(0), fixup_(nullptr) { SetModRM(3, reg); } + + // Get the operand encoding byte at the given index. + uint8_t encoding_at(int index_in) const { + CHECK_GE(index_in, 0); + CHECK_LT(index_in, length_); + return encoding_[index_in]; + } + + friend class X86_64Assembler; +}; + + +class Address : public Operand { + public: + Address(CpuRegister base_in, int32_t disp) { + Init(base_in, disp); + } + + Address(CpuRegister base_in, Offset disp) { + Init(base_in, disp.Int32Value()); + } + + Address(CpuRegister base_in, FrameOffset disp) { + CHECK_EQ(base_in.AsRegister(), RSP); + Init(CpuRegister(RSP), disp.Int32Value()); + } + + Address(CpuRegister base_in, MemberOffset disp) { + Init(base_in, disp.Int32Value()); + } + + void Init(CpuRegister base_in, int32_t disp) { + if (disp == 0 && base_in.LowBits() != RBP) { + SetModRM(0, base_in); + if (base_in.LowBits() == RSP) { + SetSIB(TIMES_1, CpuRegister(RSP), base_in); + } + } else if (disp >= -128 && disp <= 127) { + SetModRM(1, base_in); + if (base_in.LowBits() == RSP) { + SetSIB(TIMES_1, CpuRegister(RSP), base_in); + } + SetDisp8(disp); + } else { + SetModRM(2, base_in); + if (base_in.LowBits() == RSP) { + SetSIB(TIMES_1, CpuRegister(RSP), base_in); + } + SetDisp32(disp); + } + } + + + Address(CpuRegister index_in, ScaleFactor scale_in, int32_t disp) { + CHECK_NE(index_in.AsRegister(), RSP); // Illegal addressing mode. + SetModRM(0, CpuRegister(RSP)); + SetSIB(scale_in, index_in, CpuRegister(RBP)); + SetDisp32(disp); + } + + Address(CpuRegister base_in, CpuRegister index_in, ScaleFactor scale_in, int32_t disp) { + CHECK_NE(index_in.AsRegister(), RSP); // Illegal addressing mode. + if (disp == 0 && base_in.LowBits() != RBP) { + SetModRM(0, CpuRegister(RSP)); + SetSIB(scale_in, index_in, base_in); + } else if (disp >= -128 && disp <= 127) { + SetModRM(1, CpuRegister(RSP)); + SetSIB(scale_in, index_in, base_in); + SetDisp8(disp); + } else { + SetModRM(2, CpuRegister(RSP)); + SetSIB(scale_in, index_in, base_in); + SetDisp32(disp); + } + } + + // If no_rip is true then the Absolute address isn't RIP relative. + static Address Absolute(uintptr_t addr, bool no_rip = false) { + Address result; + if (no_rip) { + result.SetModRM(0, CpuRegister(RSP)); + result.SetSIB(TIMES_1, CpuRegister(RSP), CpuRegister(RBP)); + result.SetDisp32(addr); + } else { + // RIP addressing is done using RBP as the base register. + // The value in RBP isn't used. Instead the offset is added to RIP. + result.SetModRM(0, CpuRegister(RBP)); + result.SetDisp32(addr); + } + return result; + } + + // An RIP relative address that will be fixed up later. + static Address RIP(AssemblerFixup *fixup) { + Address result; + // RIP addressing is done using RBP as the base register. + // The value in RBP isn't used. Instead the offset is added to RIP. + result.SetModRM(0, CpuRegister(RBP)); + result.SetDisp32(0); + result.SetFixup(fixup); + return result; + } + + // If no_rip is true then the Absolute address isn't RIP relative. + static Address Absolute(ThreadOffset64 addr, bool no_rip = false) { + return Absolute(addr.Int32Value(), no_rip); + } + + private: + Address() {} +}; + +std::ostream &operator<<(std::ostream &os, const Address &addr); + +/** + * Class to handle constant area values. + */ +class ConstantArea { + public: + explicit ConstantArea() + : buffer_() {} + + // Add a double to the constant area, returning the offset into + // the constant area where the literal resides. + size_t AddDouble(double v); + + // Add a float to the constant area, returning the offset into + // the constant area where the literal resides. + size_t AddFloat(float v); + + // Add an int32_t to the constant area, returning the offset into + // the constant area where the literal resides. + size_t AddInt32(int32_t v); + + // Add an int32_t to the end of the constant area, returning the offset into + // the constant area where the literal resides. + size_t AppendInt32(int32_t v); + + // Add an int64_t to the constant area, returning the offset into + // the constant area where the literal resides. + size_t AddInt64(int64_t v); + + size_t GetSize() const { + return buffer_.size() * elem_size_; + } + + ArrayRef GetBuffer() const { + return ArrayRef(buffer_); + } + + private: + static constexpr size_t elem_size_ = sizeof(int32_t); + std::vector buffer_; +}; + + +// This is equivalent to the Label class, used in a slightly different context. We +// inherit the functionality of the Label class, but prevent unintended +// derived-to-base conversions by making the base class private. +class NearLabel : private Label { + public: + NearLabel() : Label() {} + + // Expose the Label routines that we need. + using Label::Position; + using Label::LinkPosition; + using Label::IsBound; + using Label::IsUnused; + using Label::IsLinked; + + private: + using Label::BindTo; + using Label::LinkTo; + + friend class x86_64::X86_64Assembler; + + DISALLOW_COPY_AND_ASSIGN(NearLabel); +}; + + +class X86_64Assembler final : public Assembler { + public: + explicit X86_64Assembler() + : Assembler(), constant_area_() {} + + virtual ~X86_64Assembler() {} + + /* + * Emit Machine Instructions. + */ + void call(CpuRegister reg); + + void call(const Address &address); + + void call(Label *label); + + void pushq(CpuRegister reg); + + void pushq(const Address &address); + + void pushq(const Immediate &imm); + + void popq(CpuRegister reg); + + void popq(const Address &address); + + void movq(CpuRegister dst, const Immediate &src); + + void movl(CpuRegister dst, const Immediate &src); + + void movq(CpuRegister dst, CpuRegister src); + + void movl(CpuRegister dst, CpuRegister src); + + void movntl(const Address &dst, CpuRegister src); + + void movntq(const Address &dst, CpuRegister src); + + void movq(CpuRegister dst, const Address &src); + + void movl(CpuRegister dst, const Address &src); + + void movq(const Address &dst, CpuRegister src); + + void movq(const Address &dst, const Immediate &imm); + + void movl(const Address &dst, CpuRegister src); + + void movl(const Address &dst, const Immediate &imm); + + void cmov(Condition c, CpuRegister dst, CpuRegister src); // This is the 64b version. + void cmov(Condition c, CpuRegister dst, CpuRegister src, bool is64bit); + + void cmov(Condition c, CpuRegister dst, const Address &src, bool is64bit); + + void movzxb(CpuRegister dst, CpuRegister src); + + void movzxb(CpuRegister dst, const Address &src); + + void movsxb(CpuRegister dst, CpuRegister src); + + void movsxb(CpuRegister dst, const Address &src); + + void movb(CpuRegister dst, const Address &src); + + void movb(const Address &dst, CpuRegister src); + + void movb(const Address &dst, const Immediate &imm); + + void movzxw(CpuRegister dst, CpuRegister src); + + void movzxw(CpuRegister dst, const Address &src); + + void movsxw(CpuRegister dst, CpuRegister src); + + void movsxw(CpuRegister dst, const Address &src); + + void movw(CpuRegister dst, const Address &src); + + void movw(const Address &dst, CpuRegister src); + + void movw(const Address &dst, const Immediate &imm); + + void leaq(CpuRegister dst, const Address &src); + + void leal(CpuRegister dst, const Address &src); + + void movaps(XmmRegister dst, XmmRegister src); // move + void movaps(XmmRegister dst, const Address &src); // load aligned + void movups(XmmRegister dst, const Address &src); // load unaligned + void movaps(const Address &dst, XmmRegister src); // store aligned + void movups(const Address &dst, XmmRegister src); // store unaligned + + void movss(XmmRegister dst, const Address &src); + + void movss(const Address &dst, XmmRegister src); + + void movss(XmmRegister dst, XmmRegister src); + + void movsxd(CpuRegister dst, CpuRegister src); + + void movsxd(CpuRegister dst, const Address &src); + + void movd(XmmRegister dst, CpuRegister src); // Note: this is the r64 version, formally movq. + void movd(CpuRegister dst, XmmRegister src); // Note: this is the r64 version, formally movq. + void movd(XmmRegister dst, CpuRegister src, bool is64bit); + + void movd(CpuRegister dst, XmmRegister src, bool is64bit); + + void addss(XmmRegister dst, XmmRegister src); + + void addss(XmmRegister dst, const Address &src); + + void subss(XmmRegister dst, XmmRegister src); + + void subss(XmmRegister dst, const Address &src); + + void mulss(XmmRegister dst, XmmRegister src); + + void mulss(XmmRegister dst, const Address &src); + + void divss(XmmRegister dst, XmmRegister src); + + void divss(XmmRegister dst, const Address &src); + + void addps(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void subps(XmmRegister dst, XmmRegister src); + + void mulps(XmmRegister dst, XmmRegister src); + + void divps(XmmRegister dst, XmmRegister src); + + void movapd(XmmRegister dst, XmmRegister src); // move + void movapd(XmmRegister dst, const Address &src); // load aligned + void movupd(XmmRegister dst, const Address &src); // load unaligned + void movapd(const Address &dst, XmmRegister src); // store aligned + void movupd(const Address &dst, XmmRegister src); // store unaligned + + void movsd(XmmRegister dst, const Address &src); + + void movsd(const Address &dst, XmmRegister src); + + void movsd(XmmRegister dst, XmmRegister src); + + void addsd(XmmRegister dst, XmmRegister src); + + void addsd(XmmRegister dst, const Address &src); + + void subsd(XmmRegister dst, XmmRegister src); + + void subsd(XmmRegister dst, const Address &src); + + void mulsd(XmmRegister dst, XmmRegister src); + + void mulsd(XmmRegister dst, const Address &src); + + void divsd(XmmRegister dst, XmmRegister src); + + void divsd(XmmRegister dst, const Address &src); + + void addpd(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void subpd(XmmRegister dst, XmmRegister src); + + void mulpd(XmmRegister dst, XmmRegister src); + + void divpd(XmmRegister dst, XmmRegister src); + + void movdqa(XmmRegister dst, XmmRegister src); // move + void movdqa(XmmRegister dst, const Address &src); // load aligned + void movdqu(XmmRegister dst, const Address &src); // load unaligned + void movdqa(const Address &dst, XmmRegister src); // store aligned + void movdqu(const Address &dst, XmmRegister src); // store unaligned + + void paddb(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void psubb(XmmRegister dst, XmmRegister src); + + void paddw(XmmRegister dst, XmmRegister src); + + void psubw(XmmRegister dst, XmmRegister src); + + void pmullw(XmmRegister dst, XmmRegister src); + + void paddd(XmmRegister dst, XmmRegister src); + + void psubd(XmmRegister dst, XmmRegister src); + + void pmulld(XmmRegister dst, XmmRegister src); + + void paddq(XmmRegister dst, XmmRegister src); + + void psubq(XmmRegister dst, XmmRegister src); + + void paddusb(XmmRegister dst, XmmRegister src); + + void paddsb(XmmRegister dst, XmmRegister src); + + void paddusw(XmmRegister dst, XmmRegister src); + + void paddsw(XmmRegister dst, XmmRegister src); + + void psubusb(XmmRegister dst, XmmRegister src); + + void psubsb(XmmRegister dst, XmmRegister src); + + void psubusw(XmmRegister dst, XmmRegister src); + + void psubsw(XmmRegister dst, XmmRegister src); + + void cvtsi2ss(XmmRegister dst, CpuRegister src); // Note: this is the r/m32 version. + void cvtsi2ss(XmmRegister dst, CpuRegister src, bool is64bit); + + void cvtsi2ss(XmmRegister dst, const Address &src, bool is64bit); + + void cvtsi2sd(XmmRegister dst, CpuRegister src); // Note: this is the r/m32 version. + void cvtsi2sd(XmmRegister dst, CpuRegister src, bool is64bit); + + void cvtsi2sd(XmmRegister dst, const Address &src, bool is64bit); + + void cvtss2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version. + void cvtss2sd(XmmRegister dst, XmmRegister src); + + void cvtss2sd(XmmRegister dst, const Address &src); + + void cvtsd2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version. + void cvtsd2ss(XmmRegister dst, XmmRegister src); + + void cvtsd2ss(XmmRegister dst, const Address &src); + + void cvttss2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version. + void cvttss2si(CpuRegister dst, XmmRegister src, bool is64bit); + + void cvttsd2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version. + void cvttsd2si(CpuRegister dst, XmmRegister src, bool is64bit); + + void cvtdq2ps(XmmRegister dst, XmmRegister src); + + void cvtdq2pd(XmmRegister dst, XmmRegister src); + + void comiss(XmmRegister a, XmmRegister b); + + void comiss(XmmRegister a, const Address &b); + + void comisd(XmmRegister a, XmmRegister b); + + void comisd(XmmRegister a, const Address &b); + + void ucomiss(XmmRegister a, XmmRegister b); + + void ucomiss(XmmRegister a, const Address &b); + + void ucomisd(XmmRegister a, XmmRegister b); + + void ucomisd(XmmRegister a, const Address &b); + + void roundsd(XmmRegister dst, XmmRegister src, const Immediate &imm); + + void roundss(XmmRegister dst, XmmRegister src, const Immediate &imm); + + void sqrtsd(XmmRegister dst, XmmRegister src); + + void sqrtss(XmmRegister dst, XmmRegister src); + + void xorpd(XmmRegister dst, const Address &src); + + void xorpd(XmmRegister dst, XmmRegister src); + + void xorps(XmmRegister dst, const Address &src); + + void xorps(XmmRegister dst, XmmRegister src); + + void pxor(XmmRegister dst, XmmRegister src); // no addr variant (for now) + + void andpd(XmmRegister dst, const Address &src); + + void andpd(XmmRegister dst, XmmRegister src); + + void andps(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void pand(XmmRegister dst, XmmRegister src); + + void andn(CpuRegister dst, CpuRegister src1, CpuRegister src2); + + void andnpd(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void andnps(XmmRegister dst, XmmRegister src); + + void pandn(XmmRegister dst, XmmRegister src); + + void orpd(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void orps(XmmRegister dst, XmmRegister src); + + void por(XmmRegister dst, XmmRegister src); + + void pavgb(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void pavgw(XmmRegister dst, XmmRegister src); + + void psadbw(XmmRegister dst, XmmRegister src); + + void pmaddwd(XmmRegister dst, XmmRegister src); + + void phaddw(XmmRegister dst, XmmRegister src); + + void phaddd(XmmRegister dst, XmmRegister src); + + void haddps(XmmRegister dst, XmmRegister src); + + void haddpd(XmmRegister dst, XmmRegister src); + + void phsubw(XmmRegister dst, XmmRegister src); + + void phsubd(XmmRegister dst, XmmRegister src); + + void hsubps(XmmRegister dst, XmmRegister src); + + void hsubpd(XmmRegister dst, XmmRegister src); + + void pminsb(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void pmaxsb(XmmRegister dst, XmmRegister src); + + void pminsw(XmmRegister dst, XmmRegister src); + + void pmaxsw(XmmRegister dst, XmmRegister src); + + void pminsd(XmmRegister dst, XmmRegister src); + + void pmaxsd(XmmRegister dst, XmmRegister src); + + void pminub(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void pmaxub(XmmRegister dst, XmmRegister src); + + void pminuw(XmmRegister dst, XmmRegister src); + + void pmaxuw(XmmRegister dst, XmmRegister src); + + void pminud(XmmRegister dst, XmmRegister src); + + void pmaxud(XmmRegister dst, XmmRegister src); + + void minps(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void maxps(XmmRegister dst, XmmRegister src); + + void minpd(XmmRegister dst, XmmRegister src); + + void maxpd(XmmRegister dst, XmmRegister src); + + void pcmpeqb(XmmRegister dst, XmmRegister src); + + void pcmpeqw(XmmRegister dst, XmmRegister src); + + void pcmpeqd(XmmRegister dst, XmmRegister src); + + void pcmpeqq(XmmRegister dst, XmmRegister src); + + void pcmpgtb(XmmRegister dst, XmmRegister src); + + void pcmpgtw(XmmRegister dst, XmmRegister src); + + void pcmpgtd(XmmRegister dst, XmmRegister src); + + void pcmpgtq(XmmRegister dst, XmmRegister src); // SSE4.2 + + void shufpd(XmmRegister dst, XmmRegister src, const Immediate &imm); + + void shufps(XmmRegister dst, XmmRegister src, const Immediate &imm); + + void pshufd(XmmRegister dst, XmmRegister src, const Immediate &imm); + + void punpcklbw(XmmRegister dst, XmmRegister src); + + void punpcklwd(XmmRegister dst, XmmRegister src); + + void punpckldq(XmmRegister dst, XmmRegister src); + + void punpcklqdq(XmmRegister dst, XmmRegister src); + + void punpckhbw(XmmRegister dst, XmmRegister src); + + void punpckhwd(XmmRegister dst, XmmRegister src); + + void punpckhdq(XmmRegister dst, XmmRegister src); + + void punpckhqdq(XmmRegister dst, XmmRegister src); + + void psllw(XmmRegister reg, const Immediate &shift_count); + + void pslld(XmmRegister reg, const Immediate &shift_count); + + void psllq(XmmRegister reg, const Immediate &shift_count); + + void psraw(XmmRegister reg, const Immediate &shift_count); + + void psrad(XmmRegister reg, const Immediate &shift_count); + // no psraq + + void psrlw(XmmRegister reg, const Immediate &shift_count); + + void psrld(XmmRegister reg, const Immediate &shift_count); + + void psrlq(XmmRegister reg, const Immediate &shift_count); + + void psrldq(XmmRegister reg, const Immediate &shift_count); + + void flds(const Address &src); + + void fstps(const Address &dst); + + void fsts(const Address &dst); + + void fldl(const Address &src); + + void fstpl(const Address &dst); + + void fstl(const Address &dst); + + void fstsw(); + + void fucompp(); + + void fnstcw(const Address &dst); + + void fldcw(const Address &src); + + void fistpl(const Address &dst); + + void fistps(const Address &dst); + + void fildl(const Address &src); + + void filds(const Address &src); + + void fincstp(); + + void ffree(const Immediate &index); + + void fsin(); + + void fcos(); + + void fptan(); + + void fprem(); + + void xchgl(CpuRegister dst, CpuRegister src); + + void xchgq(CpuRegister dst, CpuRegister src); + + void xchgl(CpuRegister reg, const Address &address); + + void cmpb(const Address &address, const Immediate &imm); + + void cmpw(const Address &address, const Immediate &imm); + + void cmpl(CpuRegister reg, const Immediate &imm); + + void cmpl(CpuRegister reg0, CpuRegister reg1); + + void cmpl(CpuRegister reg, const Address &address); + + void cmpl(const Address &address, CpuRegister reg); + + void cmpl(const Address &address, const Immediate &imm); + + void cmpq(CpuRegister reg0, CpuRegister reg1); + + void cmpq(CpuRegister reg0, const Immediate &imm); + + void cmpq(CpuRegister reg0, const Address &address); + + void cmpq(const Address &address, const Immediate &imm); + + void testl(CpuRegister reg1, CpuRegister reg2); + + void testl(CpuRegister reg, const Address &address); + + void testl(CpuRegister reg, const Immediate &imm); + + void testq(CpuRegister reg1, CpuRegister reg2); + + void testq(CpuRegister reg, const Address &address); + + void testb(const Address &address, const Immediate &imm); + + void testl(const Address &address, const Immediate &imm); + + void andl(CpuRegister dst, const Immediate &imm); + + void andl(CpuRegister dst, CpuRegister src); + + void andl(CpuRegister reg, const Address &address); + + void andq(CpuRegister dst, const Immediate &imm); + + void andq(CpuRegister dst, CpuRegister src); + + void andq(CpuRegister reg, const Address &address); + + void orl(CpuRegister dst, const Immediate &imm); + + void orl(CpuRegister dst, CpuRegister src); + + void orl(CpuRegister reg, const Address &address); + + void orq(CpuRegister dst, CpuRegister src); + + void orq(CpuRegister dst, const Immediate &imm); + + void orq(CpuRegister reg, const Address &address); + + void xorl(CpuRegister dst, CpuRegister src); + + void xorl(CpuRegister dst, const Immediate &imm); + + void xorl(CpuRegister reg, const Address &address); + + void xorq(CpuRegister dst, const Immediate &imm); + + void xorq(CpuRegister dst, CpuRegister src); + + void xorq(CpuRegister reg, const Address &address); + + void addl(CpuRegister dst, CpuRegister src); + + void addl(CpuRegister reg, const Immediate &imm); + + void addl(CpuRegister reg, const Address &address); + + void addl(const Address &address, CpuRegister reg); + + void addl(const Address &address, const Immediate &imm); + + void addw(const Address &address, const Immediate &imm); + + void addq(CpuRegister reg, const Immediate &imm); + + void addq(CpuRegister dst, CpuRegister src); + + void addq(CpuRegister dst, const Address &address); + + void subl(CpuRegister dst, CpuRegister src); + + void subl(CpuRegister reg, const Immediate &imm); + + void subl(CpuRegister reg, const Address &address); + + void subq(CpuRegister reg, const Immediate &imm); + + void subq(CpuRegister dst, CpuRegister src); + + void subq(CpuRegister dst, const Address &address); + + void cdq(); + + void cqo(); + + void idivl(CpuRegister reg); + + void idivq(CpuRegister reg); + + void imull(CpuRegister dst, CpuRegister src); + + void imull(CpuRegister reg, const Immediate &imm); + + void imull(CpuRegister dst, CpuRegister src, const Immediate &imm); + + void imull(CpuRegister reg, const Address &address); + + void imulq(CpuRegister src); + + void imulq(CpuRegister dst, CpuRegister src); + + void imulq(CpuRegister reg, const Immediate &imm); + + void imulq(CpuRegister reg, const Address &address); + + void imulq(CpuRegister dst, CpuRegister reg, const Immediate &imm); + + void imull(CpuRegister reg); + + void imull(const Address &address); + + void mull(CpuRegister reg); + + void mull(const Address &address); + + void shll(CpuRegister reg, const Immediate &imm); + + void shll(CpuRegister operand, CpuRegister shifter); + + void shrl(CpuRegister reg, const Immediate &imm); + + void shrl(CpuRegister operand, CpuRegister shifter); + + void sarl(CpuRegister reg, const Immediate &imm); + + void sarl(CpuRegister operand, CpuRegister shifter); + + void shlq(CpuRegister reg, const Immediate &imm); + + void shlq(CpuRegister operand, CpuRegister shifter); + + void shrq(CpuRegister reg, const Immediate &imm); + + void shrq(CpuRegister operand, CpuRegister shifter); + + void sarq(CpuRegister reg, const Immediate &imm); + + void sarq(CpuRegister operand, CpuRegister shifter); + + void negl(CpuRegister reg); + + void negq(CpuRegister reg); + + void notl(CpuRegister reg); + + void notq(CpuRegister reg); + + void enter(const Immediate &imm); + + void leave(); + + void ret(); + + void ret(const Immediate &imm); + + void nop(); + + void int3(); + + void hlt(); + + void j(Condition condition, Label *label); + + void j(Condition condition, NearLabel *label); + + void jrcxz(NearLabel *label); + + void jmp(CpuRegister reg); + + void jmp(const Address &address); + + void jmp(Label *label); + + void jmp(NearLabel *label); + + X86_64Assembler *lock(); + + void cmpxchgl(const Address &address, CpuRegister reg); + + void cmpxchgq(const Address &address, CpuRegister reg); + + void mfence(); + + X86_64Assembler *gs(); + + void setcc(Condition condition, CpuRegister dst); + + void bswapl(CpuRegister dst); + + void bswapq(CpuRegister dst); + + void bsfl(CpuRegister dst, CpuRegister src); + + void bsfl(CpuRegister dst, const Address &src); + + void bsfq(CpuRegister dst, CpuRegister src); + + void bsfq(CpuRegister dst, const Address &src); + + void blsi(CpuRegister dst, CpuRegister src); // no addr variant (for now) + void blsmsk(CpuRegister dst, CpuRegister src); // no addr variant (for now) + void blsr(CpuRegister dst, CpuRegister src); // no addr variant (for now) + + void bsrl(CpuRegister dst, CpuRegister src); + + void bsrl(CpuRegister dst, const Address &src); + + void bsrq(CpuRegister dst, CpuRegister src); + + void bsrq(CpuRegister dst, const Address &src); + + void popcntl(CpuRegister dst, CpuRegister src); + + void popcntl(CpuRegister dst, const Address &src); + + void popcntq(CpuRegister dst, CpuRegister src); + + void popcntq(CpuRegister dst, const Address &src); + + void rorl(CpuRegister reg, const Immediate &imm); + + void rorl(CpuRegister operand, CpuRegister shifter); + + void roll(CpuRegister reg, const Immediate &imm); + + void roll(CpuRegister operand, CpuRegister shifter); + + void rorq(CpuRegister reg, const Immediate &imm); + + void rorq(CpuRegister operand, CpuRegister shifter); + + void rolq(CpuRegister reg, const Immediate &imm); + + void rolq(CpuRegister operand, CpuRegister shifter); + + void repne_scasb(); + + void repne_scasw(); + + void repe_cmpsw(); + + void repe_cmpsl(); + + void repe_cmpsq(); + + void rep_movsw(); + + // + // Macros for High-level operations. + // + + void AddImmediate(CpuRegister reg, const Immediate &imm); + + void LoadDoubleConstant(XmmRegister dst, double value); + + void LockCmpxchgl(const Address &address, CpuRegister reg) { + lock()->cmpxchgl(address, reg); + } + + void LockCmpxchgq(const Address &address, CpuRegister reg) { + lock()->cmpxchgq(address, reg); + } + + // + // Misc. functionality + // + int PreferredLoopAlignment() { return 16; } + + void Align(int alignment, int offset); + + void Bind(Label *label) override; + + void Jump(Label *label) override { + jmp(label); + } + + void Bind(NearLabel *label); + + // Add a double to the constant area, returning the offset into + // the constant area where the literal resides. + size_t AddDouble(double v) { return constant_area_.AddDouble(v); } + + // Add a float to the constant area, returning the offset into + // the constant area where the literal resides. + size_t AddFloat(float v) { return constant_area_.AddFloat(v); } + + // Add an int32_t to the constant area, returning the offset into + // the constant area where the literal resides. + size_t AddInt32(int32_t v) { + return constant_area_.AddInt32(v); + } + + // Add an int32_t to the end of the constant area, returning the offset into + // the constant area where the literal resides. + size_t AppendInt32(int32_t v) { + return constant_area_.AppendInt32(v); + } + + // Add an int64_t to the constant area, returning the offset into + // the constant area where the literal resides. + size_t AddInt64(int64_t v) { return constant_area_.AddInt64(v); } + + // Add the contents of the constant area to the assembler buffer. + void AddConstantArea(); + + // Is the constant area empty? Return true if there are no literals in the constant area. + bool IsConstantAreaEmpty() const { return constant_area_.GetSize() == 0; } + + // Return the current size of the constant area. + size_t ConstantAreaSize() const { return constant_area_.GetSize(); } + + // + // Heap poisoning. + // + + // Poison a heap reference contained in `reg`. + void PoisonHeapReference(CpuRegister reg) { negl(reg); } + + // Unpoison a heap reference contained in `reg`. + void UnpoisonHeapReference(CpuRegister reg) { negl(reg); } + + // Poison a heap reference contained in `reg` if heap poisoning is enabled. + void MaybePoisonHeapReference(CpuRegister reg) { + if (kPoisonHeapReferences) { + PoisonHeapReference(reg); + } + } + + // Unpoison a heap reference contained in `reg` if heap poisoning is enabled. + void MaybeUnpoisonHeapReference(CpuRegister reg) { + if (kPoisonHeapReferences) { + UnpoisonHeapReference(reg); + } + } + + private: + void EmitUint8(uint8_t value); + + void EmitInt32(int32_t value); + + void EmitInt64(int64_t value); + + void EmitRegisterOperand(uint8_t rm, uint8_t reg); + + void EmitXmmRegisterOperand(uint8_t rm, XmmRegister reg); + + void EmitFixup(AssemblerFixup *fixup); + + void EmitOperandSizeOverride(); + + void EmitOperand(uint8_t rm, const Operand &operand); + + void EmitImmediate(const Immediate &imm, bool is_16_op = false); + + void EmitComplex( + uint8_t rm, const Operand &operand, const Immediate &immediate, bool is_16_op = false); + + void EmitLabel(Label *label, int instruction_size); + + void EmitLabelLink(Label *label); + + void EmitLabelLink(NearLabel *label); + + void EmitGenericShift(bool wide, int rm, CpuRegister reg, const Immediate &imm); + + void EmitGenericShift(bool wide, int rm, CpuRegister operand, CpuRegister shifter); + + // If any input is not false, output the necessary rex prefix. + void EmitOptionalRex(bool force, bool w, bool r, bool x, bool b); + + // Emit a rex prefix byte if necessary for reg. ie if reg is a register in the range R8 to R15. + void EmitOptionalRex32(CpuRegister reg); + + void EmitOptionalRex32(CpuRegister dst, CpuRegister src); + + void EmitOptionalRex32(XmmRegister dst, XmmRegister src); + + void EmitOptionalRex32(CpuRegister dst, XmmRegister src); + + void EmitOptionalRex32(XmmRegister dst, CpuRegister src); + + void EmitOptionalRex32(const Operand &operand); + + void EmitOptionalRex32(CpuRegister dst, const Operand &operand); + + void EmitOptionalRex32(XmmRegister dst, const Operand &operand); + + // Emit a REX.W prefix plus necessary register bit encodings. + void EmitRex64(); + + void EmitRex64(CpuRegister reg); + + void EmitRex64(const Operand &operand); + + void EmitRex64(CpuRegister dst, CpuRegister src); + + void EmitRex64(CpuRegister dst, const Operand &operand); + + void EmitRex64(XmmRegister dst, const Operand &operand); + + void EmitRex64(XmmRegister dst, CpuRegister src); + + void EmitRex64(CpuRegister dst, XmmRegister src); + + // Emit a REX prefix to normalize byte registers plus necessary register bit encodings. + void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, CpuRegister src); + + void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, const Operand &operand); + + // Emit a 3 byte VEX Prefix + uint8_t EmitVexByteZero(bool is_two_byte); + + uint8_t EmitVexByte1(bool r, bool x, bool b, int mmmmm); + + uint8_t EmitVexByte2(bool w, int l, X86_64ManagedRegister operand, int pp); + + ConstantArea constant_area_; + + DISALLOW_COPY_AND_ASSIGN(X86_64Assembler); +}; + +inline void X86_64Assembler::EmitUint8(uint8_t value) { + buffer_.Emit(value); +} + +inline void X86_64Assembler::EmitInt32(int32_t value) { + buffer_.Emit(value); +} + +inline void X86_64Assembler::EmitInt64(int64_t value) { + // Write this 64-bit value as two 32-bit words for alignment reasons + // (this is essentially when running on ARM, which does not allow + // 64-bit unaligned accesses). We assume little-endianness here. + EmitInt32(Low32Bits(value)); + EmitInt32(High32Bits(value)); +} + +inline void X86_64Assembler::EmitRegisterOperand(uint8_t rm, uint8_t reg) { + CHECK_GE(rm, 0); + CHECK_LT(rm, 8); + buffer_.Emit((0xC0 | (reg & 7)) + (rm << 3)); +} + +inline void X86_64Assembler::EmitXmmRegisterOperand(uint8_t rm, XmmRegister reg) { + EmitRegisterOperand(rm, static_cast(reg.AsFloatRegister())); +} + +inline void X86_64Assembler::EmitFixup(AssemblerFixup *fixup) { + buffer_.EmitFixup(fixup); +} + +inline void X86_64Assembler::EmitOperandSizeOverride() { + EmitUint8(0x66); +} + + +} // namespace x86_64 +} // namespace whale + +#endif // WHALE_ASSEMBLER_X86_64_ASSEMBLER_X86_64_H_ diff --git a/module/src/main/cpp/whale/src/assembler/x86_64/constants_x86_64.h b/module/src/main/cpp/whale/src/assembler/x86_64/constants_x86_64.h new file mode 100644 index 00000000..10c1220a --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/x86_64/constants_x86_64.h @@ -0,0 +1,126 @@ +#ifndef WHALE_ASSEMBLER_X86_64_CONSTANTS_X86_64_H_ +#define WHALE_ASSEMBLER_X86_64_CONSTANTS_X86_64_H_ + +#include +#include "assembler/x86_64/registers_x86_64.h" +#include "base/macros.h" + +namespace whale { +namespace x86_64 { + +class CpuRegister { + public: + explicit constexpr CpuRegister(Register r) : reg_(r) {} + + constexpr CpuRegister(int r) : reg_(Register(r)) {} + + constexpr Register AsRegister() const { + return reg_; + } + + constexpr uint8_t LowBits() const { + return reg_ & 7; + } + + constexpr bool NeedsRex() const { + return reg_ > 7; + } + + private: + const Register reg_; +}; + +class XmmRegister { + public: + explicit constexpr XmmRegister(FloatRegister r) : reg_(r) {} + + constexpr XmmRegister(int r) : reg_(FloatRegister(r)) {} + + constexpr FloatRegister AsFloatRegister() const { + return reg_; + } + + constexpr uint8_t LowBits() const { + return reg_ & 7; + } + + constexpr bool NeedsRex() const { + return reg_ > 7; + } + + private: + const FloatRegister reg_; +}; + +enum X87Register { + ST0 = 0, + ST1 = 1, + ST2 = 2, + ST3 = 3, + ST4 = 4, + ST5 = 5, + ST6 = 6, + ST7 = 7, + kNumberOfX87Registers = 8, + kNoX87Register = -1 // Signals an illegal register. +}; + +enum ScaleFactor { + TIMES_1 = 0, + TIMES_2 = 1, + TIMES_4 = 2, + TIMES_8 = 3 +}; + +enum Condition { + kOverflow = 0, + kNoOverflow = 1, + kBelow = 2, + kAboveEqual = 3, + kEqual = 4, + kNotEqual = 5, + kBelowEqual = 6, + kAbove = 7, + kSign = 8, + kNotSign = 9, + kParityEven = 10, + kParityOdd = 11, + kLess = 12, + kGreaterEqual = 13, + kLessEqual = 14, + kGreater = 15, + + kZero = kEqual, + kNotZero = kNotEqual, + kNegative = kSign, + kPositive = kNotSign, + kCarrySet = kBelow, + kCarryClear = kAboveEqual, + kUnordered = kParityEven +}; + + +class Instr { + public: + static const uint8_t kHltInstruction = 0xF4; + // We prefer not to use the int3 instruction since it conflicts with gdb. + static const uint8_t kBreakPointInstruction = kHltInstruction; + + bool IsBreakPoint() { + return (*reinterpret_cast(this)) == kBreakPointInstruction; + } + + // Instructions are read out of a code stream. The only way to get a + // reference to an instruction is to convert a pointer. There is no way + // to allocate or create instances of class Instr. + // Use the At(pc) function to create references to Instr. + static Instr *At(uintptr_t pc) { return reinterpret_cast(pc); } + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(Instr); +}; + +} // namespace x86_64 +} // namespace whale + +#endif // WHALE_ASSEMBLER_X86_64_CONSTANTS_X86_64_H_ diff --git a/module/src/main/cpp/whale/src/assembler/x86_64/managed_register_x86_64.cc b/module/src/main/cpp/whale/src/assembler/x86_64/managed_register_x86_64.cc new file mode 100644 index 00000000..2a568d71 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/x86_64/managed_register_x86_64.cc @@ -0,0 +1,66 @@ +#include "assembler/x86_64/managed_register_x86_64.h" + +namespace whale { +namespace x86_64 { + +// Define register pairs. +// This list must be kept in sync with the RegisterPair enum. +#define REGISTER_PAIR_LIST(P) \ + P(RAX, RDX) \ + P(RAX, RCX) \ + P(RAX, RBX) \ + P(RAX, RDI) \ + P(RDX, RCX) \ + P(RDX, RBX) \ + P(RDX, RDI) \ + P(RCX, RBX) \ + P(RCX, RDI) \ + P(RBX, RDI) + + +struct RegisterPairDescriptor { + RegisterPair reg; // Used to verify that the enum is in sync. + Register low; + Register high; +}; + + +static const RegisterPairDescriptor kRegisterPairs[] = { +#define REGISTER_PAIR_ENUMERATION(low, high) { low##_##high, low, high }, + REGISTER_PAIR_LIST(REGISTER_PAIR_ENUMERATION) +#undef REGISTER_PAIR_ENUMERATION +}; + + +bool X86_64ManagedRegister::Overlaps(const X86_64ManagedRegister &other) const { + if (IsNoRegister() || other.IsNoRegister()) return false; + if (Equals(other)) return true; + if (IsRegisterPair()) { + Register low = AsRegisterPairLow().AsRegister(); + Register high = AsRegisterPairHigh().AsRegister(); + return X86_64ManagedRegister::FromCpuRegister(low).Overlaps(other) || + X86_64ManagedRegister::FromCpuRegister(high).Overlaps(other); + } + if (other.IsRegisterPair()) { + return other.Overlaps(*this); + } + return false; +} + + +int X86_64ManagedRegister::AllocIdLow() const { + const int r = RegId() - (kNumberOfCpuRegIds + kNumberOfXmmRegIds + + kNumberOfX87RegIds); + return kRegisterPairs[r].low; +} + + +int X86_64ManagedRegister::AllocIdHigh() const { + const int r = RegId() - (kNumberOfCpuRegIds + kNumberOfXmmRegIds + + kNumberOfX87RegIds); + return kRegisterPairs[r].high; +} + + +} // namespace x86_64 +} // namespace whale diff --git a/module/src/main/cpp/whale/src/assembler/x86_64/managed_register_x86_64.h b/module/src/main/cpp/whale/src/assembler/x86_64/managed_register_x86_64.h new file mode 100644 index 00000000..fa48da01 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/x86_64/managed_register_x86_64.h @@ -0,0 +1,172 @@ +#ifndef WHALE_ASSEMBLER_MANAGED_REGISTER_X86_64_H_ +#define WHALE_ASSEMBLER_MANAGED_REGISTER_X86_64_H_ + +#include "base/logging.h" +#include "assembler/x86_64/registers_x86_64.h" +#include "assembler/x86_64/constants_x86_64.h" +#include "assembler/managed_register.h" + +namespace whale { +namespace x86_64 { + + +// Values for register pairs. +// The registers in kReservedCpuRegistersArray in x86.cc are not used in pairs. +// The table kRegisterPairs in x86.cc must be kept in sync with this enum. +enum RegisterPair { + RAX_RDX = 0, + RAX_RCX = 1, + RAX_RBX = 2, + RAX_RDI = 3, + RDX_RCX = 4, + RDX_RBX = 5, + RDX_RDI = 6, + RCX_RBX = 7, + RCX_RDI = 8, + RBX_RDI = 9, + kNumberOfRegisterPairs = 10, + kNoRegisterPair = -1, +}; + +std::ostream &operator<<(std::ostream &os, const RegisterPair ®); + +const int kNumberOfCpuRegIds = kNumberOfCpuRegisters; +const int kNumberOfCpuAllocIds = kNumberOfCpuRegisters; + +const int kNumberOfXmmRegIds = kNumberOfFloatRegisters; +const int kNumberOfXmmAllocIds = kNumberOfFloatRegisters; + +const int kNumberOfX87RegIds = kNumberOfX87Registers; +const int kNumberOfX87AllocIds = kNumberOfX87Registers; + +const int kNumberOfPairRegIds = kNumberOfRegisterPairs; + +const int kNumberOfRegIds = kNumberOfCpuRegIds + kNumberOfXmmRegIds + + kNumberOfX87RegIds + kNumberOfPairRegIds; +const int kNumberOfAllocIds = kNumberOfCpuAllocIds + kNumberOfXmmAllocIds + + kNumberOfX87RegIds; + +// Register ids map: +// [0..R[ cpu registers (enum Register) +// [R..X[ xmm registers (enum XmmRegister) +// [X..S[ x87 registers (enum X87Register) +// [S..P[ register pairs (enum RegisterPair) +// where +// R = kNumberOfCpuRegIds +// X = R + kNumberOfXmmRegIds +// S = X + kNumberOfX87RegIds +// P = X + kNumberOfRegisterPairs + +// Allocation ids map: +// [0..R[ cpu registers (enum Register) +// [R..X[ xmm registers (enum XmmRegister) +// [X..S[ x87 registers (enum X87Register) +// where +// R = kNumberOfCpuRegIds +// X = R + kNumberOfXmmRegIds +// S = X + kNumberOfX87RegIds + + +// An instance of class 'ManagedRegister' represents a single cpu register (enum +// Register), an xmm register (enum XmmRegister), or a pair of cpu registers +// (enum RegisterPair). +// 'ManagedRegister::NoRegister()' provides an invalid register. +// There is a one-to-one mapping between ManagedRegister and register id. +class X86_64ManagedRegister : public ManagedRegister { + public: + constexpr CpuRegister AsCpuRegister() const { + return CpuRegister(static_cast(id_)); + } + + constexpr XmmRegister AsXmmRegister() const { + return XmmRegister(static_cast(id_ - kNumberOfCpuRegIds)); + } + + constexpr X87Register AsX87Register() const { + return static_cast(id_ - + (kNumberOfCpuRegIds + kNumberOfXmmRegIds)); + } + + constexpr CpuRegister AsRegisterPairLow() const { + // Appropriate mapping of register ids allows to use AllocIdLow(). + return FromRegId(AllocIdLow()).AsCpuRegister(); + } + + constexpr CpuRegister AsRegisterPairHigh() const { + // Appropriate mapping of register ids allows to use AllocIdHigh(). + return FromRegId(AllocIdHigh()).AsCpuRegister(); + } + + constexpr bool IsCpuRegister() const { + return (0 <= id_) && (id_ < kNumberOfCpuRegIds); + } + + constexpr bool IsXmmRegister() const { + const int test = id_ - kNumberOfCpuRegIds; + return (0 <= test) && (test < kNumberOfXmmRegIds); + } + + constexpr bool IsX87Register() const { + const int test = id_ - (kNumberOfCpuRegIds + kNumberOfXmmRegIds); + return (0 <= test) && (test < kNumberOfX87RegIds); + } + + constexpr bool IsRegisterPair() const { + const int test = id_ - + (kNumberOfCpuRegIds + kNumberOfXmmRegIds + kNumberOfX87RegIds); + return (0 <= test) && (test < kNumberOfPairRegIds); + } + + // Returns true if the two managed-registers ('this' and 'other') overlap. + // Either managed-register may be the NoRegister. If both are the NoRegister + // then false is returned. + bool Overlaps(const X86_64ManagedRegister &other) const; + + static constexpr X86_64ManagedRegister FromCpuRegister(Register r) { + return FromRegId(r); + } + + static constexpr X86_64ManagedRegister FromXmmRegister(FloatRegister r) { + return FromRegId(r + kNumberOfCpuRegIds); + } + + static constexpr X86_64ManagedRegister FromX87Register(X87Register r) { + return FromRegId(r + kNumberOfCpuRegIds + kNumberOfXmmRegIds); + } + + static constexpr X86_64ManagedRegister FromRegisterPair(RegisterPair r) { + return FromRegId(r + (kNumberOfCpuRegIds + kNumberOfXmmRegIds + + kNumberOfX87RegIds)); + } + + private: + constexpr bool IsValidManagedRegister() const { + return (0 <= id_) && (id_ < kNumberOfRegIds); + } + + constexpr int RegId() const { + return id_; + } + + int AllocId() const { + return id_; + } + + int AllocIdLow() const; + + int AllocIdHigh() const; + + friend class ManagedRegister; + + explicit constexpr X86_64ManagedRegister(int reg_id) : ManagedRegister(reg_id) {} + + static constexpr X86_64ManagedRegister FromRegId(int reg_id) { + X86_64ManagedRegister reg(reg_id); + return reg; + } +}; + +} // namespace x86_64 +} // namespace whale + +#endif // WHALE_ASSEMBLER_MANAGED_REGISTER_X86_64_H_ diff --git a/module/src/main/cpp/whale/src/assembler/x86_64/registers_x86_64.h b/module/src/main/cpp/whale/src/assembler/x86_64/registers_x86_64.h new file mode 100644 index 00000000..604f67e1 --- /dev/null +++ b/module/src/main/cpp/whale/src/assembler/x86_64/registers_x86_64.h @@ -0,0 +1,52 @@ +#ifndef WHALE_ASSEMBLER_REGISTERS_X86_64_H_ +#define WHALE_ASSEMBLER_REGISTERS_X86_64_H_ + +namespace whale { +namespace x86_64 { + +enum Register { + RAX = 0, + RCX = 1, + RDX = 2, + RBX = 3, + RSP = 4, + RBP = 5, + RSI = 6, + RDI = 7, + R8 = 8, + R9 = 9, + R10 = 10, + R11 = 11, + R12 = 12, + R13 = 13, + R14 = 14, + R15 = 15, + kLastCpuRegister = 15, + kNumberOfCpuRegisters = 16, + kNoRegister = -1 // Signals an illegal register. +}; + +enum FloatRegister { + XMM0 = 0, + XMM1 = 1, + XMM2 = 2, + XMM3 = 3, + XMM4 = 4, + XMM5 = 5, + XMM6 = 6, + XMM7 = 7, + XMM8 = 8, + XMM9 = 9, + XMM10 = 10, + XMM11 = 11, + XMM12 = 12, + XMM13 = 13, + XMM14 = 14, + XMM15 = 15, + kNumberOfFloatRegisters = 16 +}; + +} // namespace x86_64 +} // namespace whale + +#endif // WHALE_ASSEMBLER_REGISTERS_X86_64_H_ diff --git a/module/src/main/cpp/whale/src/base/align.h b/module/src/main/cpp/whale/src/base/align.h new file mode 100644 index 00000000..364cc6c8 --- /dev/null +++ b/module/src/main/cpp/whale/src/base/align.h @@ -0,0 +1,144 @@ +#ifndef WHALE_ASSEMBLER_ASSEMBLER_UTILS_H_ +#define WHALE_ASSEMBLER_ASSEMBLER_UTILS_H_ + + +#include +#include +#include +#include "base/cxx_helper.h" +#include "base/logging.h" +#include "base/macros.h" + + +// Use implicit_cast as a safe version of static_cast or const_cast +// for upcasting in the type hierarchy (i.e. casting a pointer to Foo +// to a pointer to SuperclassOfFoo or casting a pointer to Foo to +// a const pointer to Foo). +// When you use implicit_cast, the compiler checks that the cast is safe. +// Such explicit implicit_casts are necessary in surprisingly many +// situations where C++ demands an exact type match instead of an +// argument type convertible to a target type. +// +// The From type can be inferred, so the preferred syntax for using +// implicit_cast is the same as for static_cast etc.: +// +// implicit_cast(expr) +// +// implicit_cast would have been part of the C++ standard library, +// but the proposal was submitted too late. It will probably make +// its way into the language in the future. +template +inline To implicit_cast(From const &f) { + return f; +} + +// When you upcast (that is, cast a pointer from type Foo to type +// SuperclassOfFoo), it's fine to use implicit_cast<>, since upcasts +// always succeed. When you downcast (that is, cast a pointer from +// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because +// how do you know the pointer is really of type SubclassOfFoo? It +// could be a bare Foo, or of type DifferentSubclassOfFoo. Thus, +// when you downcast, you should use this macro. In debug mode, we +// use dynamic_cast<> to double-check the downcast is legal (we die +// if it's not). In normal mode, we do the efficient static_cast<> +// instead. Thus, it's important to test in debug mode to make sure +// the cast is legal! +// This is the only place in the code we should use dynamic_cast<>. +// In particular, you SHOULDN'T be using dynamic_cast<> in order to +// do RTTI (eg code like this: +// if (dynamic_cast(foo)) HandleASubclass1Object(foo); +// if (dynamic_cast(foo)) HandleASubclass2Object(foo); +// You should design the code some other way not to need this. + +template +// use like this: down_cast(foo); +inline To down_cast(From *f) { // so we only accept pointers + static_assert(std::is_base_of::type>::value, + "down_cast unsafe as To is not a subtype of From"); + + return static_cast(f); +} + +template +// use like this: down_cast(foo); +inline To down_cast(From &f) { // so we only accept references + static_assert(std::is_base_of::type>::value, + "down_cast unsafe as To is not a subtype of From"); + + return static_cast(f); +} + +template +inline Dest bit_cast(const Source &source) { + // Compile time assertion: sizeof(Dest) == sizeof(Source) + // A compile error here means your Dest and Source have different sizes. + static_assert(sizeof(Dest) == sizeof(Source), "sizes should be equal"); + Dest dest; + memcpy(&dest, &source, sizeof(dest)); + return dest; +} + +template +constexpr bool IsPowerOfTwo(T x) { + static_assert(std::is_integral::value, "T must be integral"); + return (x & (x - 1)) == 0; +} + +template +constexpr T RoundDown(T x, typename Identity::type n) { + DCHECK(IsPowerOfTwo(n)); + return (x & -n); +} + +template +constexpr T RoundUp(T x, typename std::remove_reference::type n) { + return RoundDown(x + n - 1, n); +} + + +template +inline T *AlignDown(T *x, uintptr_t n) { + return reinterpret_cast(RoundDown(reinterpret_cast(x), n)); +} + + +template +inline T *AlignUp(T *x, uintptr_t n) { + return reinterpret_cast(RoundUp(reinterpret_cast(x), n)); +} + +template +constexpr bool IsAligned(T x) { + static_assert((n & (n - 1)) == 0, "n is not a power of two"); + return (x & (n - 1)) == 0; +} + +template +inline bool IsAligned(T *x) { + return IsAligned(reinterpret_cast(x)); +} + +static inline size_t GetPageSize() { + return static_cast(sysconf(_SC_PAGE_SIZE)); +} + +inline intptr_t PageAlign(intptr_t x) { + return RoundUp(x, GetPageSize()); +} + +template +inline T PageAlign(T x) { + return RoundUp(x, GetPageSize()); +} + +inline intptr_t PageStart(intptr_t x) { + return ~(GetPageSize() - 1) & x; +} + +template +inline T *PageStart(T *x) { + return reinterpret_cast(PageStart(reinterpret_cast(x))); +} + + +#endif // WHALE_ASSEMBLER_ASSEMBLER_UTILS_H_ diff --git a/module/src/main/cpp/whale/src/base/array_ref.h b/module/src/main/cpp/whale/src/base/array_ref.h new file mode 100644 index 00000000..ad54c6f9 --- /dev/null +++ b/module/src/main/cpp/whale/src/base/array_ref.h @@ -0,0 +1,195 @@ +#ifndef WHALE_BASE_ARRAY_REF_H_ +#define WHALE_BASE_ARRAY_REF_H_ + +#include +#include +#include "base/logging.h" + +namespace whale { + + +/** + * @brief A container that references an array. + * + * @details The template class ArrayRef provides a container that references + * an external array. This external array must remain alive while the ArrayRef + * object is in use. The external array may be a std::vector<>-backed storage + * or any other contiguous chunk of memory but that memory must remain valid, + * i.e. the std::vector<> must not be resized for example. + * + * Except for copy/assign and insert/erase/capacity functions, the interface + * is essentially the same as std::vector<>. Since we don't want to throw + * exceptions, at() is also excluded. + */ +template +class ArrayRef { + public: + using value_type = T; + using reference = T &; + using const_reference = const T &; + using pointer = T *; + using const_pointer = const T *; + using iterator = T *; + using const_iterator = const T *; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + using difference_type = ptrdiff_t; + using size_type = size_t; + + // Constructors. + + constexpr ArrayRef() + : array_(nullptr), size_(0u) { + } + + template + explicit constexpr ArrayRef(T (&array)[size]) + : array_(array), size_((size_t) size) { + } + + template::value>::type> + explicit constexpr ArrayRef(U (&array)[size]) + : array_(array), size_((size_t) size) { + } + + constexpr ArrayRef(T *array_in, size_t size_in) + : array_(array_in), size_(size_in) { + } + + template::value>::type> + explicit ArrayRef(Vector &v) + : array_(v.data()), size_(v.size()) { + } + + template::type, + value_type>::value>::type> + explicit ArrayRef(const Vector &v) + : array_(v.data()), size_(v.size()) { + } + + ArrayRef(const ArrayRef &) = default; + + // Assignment operators. + + ArrayRef &operator=(const ArrayRef &other) { + array_ = other.array_; + size_ = other.size_; + return *this; + } + + template + typename std::enable_if::value, ArrayRef>::type & + operator=(const ArrayRef &other) { + return *this = ArrayRef(other); + } + + template + static ArrayRef Cast(const ArrayRef &src) { + return ArrayRef(reinterpret_cast(src.data()), + src.size() * sizeof(T) / sizeof(U)); + } + + // Destructor. + ~ArrayRef() = default; + + // Iterators. + iterator begin() { return array_; } + + const_iterator begin() const { return array_; } + + const_iterator cbegin() const { return array_; } + + iterator end() { return array_ + size_; } + + const_iterator end() const { return array_ + size_; } + + const_iterator cend() const { return array_ + size_; } + + reverse_iterator rbegin() { return reverse_iterator(end()); } + + const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } + + const_reverse_iterator crbegin() const { return const_reverse_iterator(cend()); } + + reverse_iterator rend() { return reverse_iterator(begin()); } + + const_reverse_iterator rend() const { return const_reverse_iterator(begin()); } + + const_reverse_iterator crend() const { return const_reverse_iterator(cbegin()); } + + // Size. + size_type size() const { return size_; } + + bool empty() const { return size() == 0u; } + + // Element access. NOTE: Not providing at(). + + reference operator[](size_type n) { + return array_[n]; + } + + const_reference operator[](size_type n) const { + return array_[n]; + } + + reference front() { + return array_[0]; + } + + const_reference front() const { + return array_[0]; + } + + reference back() { + return array_[size_ - 1u]; + } + + const_reference back() const { + return array_[size_ - 1u]; + } + + value_type *data() { return array_; } + + const value_type *data() const { return array_; } + + ArrayRef SubArray(size_type pos) { + return SubArray(pos, size() - pos); + } + + ArrayRef SubArray(size_type pos) const { + return SubArray(pos, size() - pos); + } + + ArrayRef SubArray(size_type pos, size_type length) { + return ArrayRef(data() + pos, length); + } + + ArrayRef SubArray(size_type pos, size_type length) const { + return ArrayRef(data() + pos, length); + } + + private: + T *array_; + size_t size_; +}; + +template +bool operator==(const ArrayRef &lhs, const ArrayRef &rhs) { + return lhs.size() == rhs.size() && std::equal(lhs.begin(), lhs.end(), rhs.begin()); +} + +template +bool operator!=(const ArrayRef &lhs, const ArrayRef &rhs) { + return !(lhs == rhs); +} + +} // namespace whale + +#endif // WHALE_BASE_ARRAY_REF_H_ diff --git a/module/src/main/cpp/whale/src/base/bit_utils.h b/module/src/main/cpp/whale/src/base/bit_utils.h new file mode 100644 index 00000000..7600ea4d --- /dev/null +++ b/module/src/main/cpp/whale/src/base/bit_utils.h @@ -0,0 +1,479 @@ +#ifndef WHALE_BASE_BIT_UTILS_H_ +#define WHALE_BASE_BIT_UTILS_H_ + +#include +#include +#include +#include +#include "base/cxx_helper.h" + +namespace whale { + +static constexpr size_t KB = 1024; +static constexpr size_t MB = KB * KB; +static constexpr size_t GB = KB * KB * KB; + +// Runtime sizes. +static constexpr size_t kBitsPerByte = 8; +static constexpr size_t kBitsPerByteLog2 = 3; +static constexpr int kBitsPerIntPtrT = sizeof(intptr_t) * kBitsPerByte; + +// Like sizeof, but count how many bits a type takes. Pass type explicitly. +template +constexpr size_t BitSizeOf() { + static_assert(std::is_integral::value, "T must be integral"); + using unsigned_type = typename std::make_unsigned::type; + static_assert(sizeof(T) == sizeof(unsigned_type), "Unexpected type size mismatch!"); + static_assert(std::numeric_limits::radix == 2, "Unexpected radix!"); + return std::numeric_limits::digits; +} + +// Like sizeof, but count how many bits a type takes. Infers type from parameter. +template +constexpr size_t BitSizeOf(T /*x*/) { + return BitSizeOf(); +} + +template +constexpr int CLZ(T x) { + static_assert(std::is_integral::value, "T must be integral"); + static_assert(std::is_unsigned::value, "T must be unsigned"); + static_assert(std::numeric_limits::radix == 2, "Unexpected radix!"); + static_assert(sizeof(T) == sizeof(uint64_t) || sizeof(T) <= sizeof(uint32_t), + "Unsupported sizeof(T)"); + constexpr bool is_64_bit = (sizeof(T) == sizeof(uint64_t)); + constexpr size_t adjustment = + is_64_bit ? 0u : std::numeric_limits::digits - std::numeric_limits::digits; + return is_64_bit ? __builtin_clzll(x) : __builtin_clz(x) - adjustment; +} + +// Similar to CLZ except that on zero input it returns bitwidth and supports signed integers. +template +constexpr int JAVASTYLE_CLZ(T x) { + static_assert(std::is_integral::value, "T must be integral"); + using unsigned_type = typename std::make_unsigned::type; + return (x == 0) ? BitSizeOf() : CLZ(static_cast(x)); +} + +template +constexpr int CTZ(T x) { + static_assert(std::is_integral::value, "T must be integral"); + // It is not unreasonable to ask for trailing zeros in a negative number. As such, do not check + // that T is an unsigned type. + static_assert(sizeof(T) == sizeof(uint64_t) || sizeof(T) <= sizeof(uint32_t), + "Unsupported sizeof(T)"); + return (sizeof(T) == sizeof(uint64_t)) ? __builtin_ctzll(x) : __builtin_ctz(x); +} + +// Similar to CTZ except that on zero input it returns bitwidth and supports signed integers. +template +constexpr int JAVASTYLE_CTZ(T x) { + static_assert(std::is_integral::value, "T must be integral"); + using unsigned_type = typename std::make_unsigned::type; + return (x == 0) ? BitSizeOf() : CTZ(static_cast(x)); +} + +// Return the number of 1-bits in `x`. +template +constexpr int POPCOUNT(T x) { + return (sizeof(T) == sizeof(uint32_t)) ? __builtin_popcount(x) : __builtin_popcountll(x); +} + +// Swap bytes. +template +constexpr T BSWAP(T x) { + if (sizeof(T) == sizeof(uint16_t)) { + return __builtin_bswap16(x); + } else if (sizeof(T) == sizeof(uint32_t)) { + return __builtin_bswap32(x); + } else { + return __builtin_bswap64(x); + } +} + +// Find the bit position of the most significant bit (0-based), or -1 if there were no bits set. +template +constexpr ssize_t MostSignificantBit(T value) { + static_assert(std::is_integral::value, "T must be integral"); + static_assert(std::is_unsigned::value, "T must be unsigned"); + static_assert(std::numeric_limits::radix == 2, "Unexpected radix!"); + return (value == 0) ? -1 : std::numeric_limits::digits - 1 - CLZ(value); +} + +// Find the bit position of the least significant bit (0-based), or -1 if there were no bits set. +template +constexpr ssize_t LeastSignificantBit(T value) { + static_assert(std::is_integral::value, "T must be integral"); + static_assert(std::is_unsigned::value, "T must be unsigned"); + return (value == 0) ? -1 : CTZ(value); +} + +// How many bits (minimally) does it take to store the constant 'value'? i.e. 1 for 1, 3 for 5, etc. +template +constexpr size_t MinimumBitsToStore(T value) { + return static_cast(MostSignificantBit(value) + 1); +} + +template +constexpr T RoundUpToPowerOfTwo(T x) { + static_assert(std::is_integral::value, "T must be integral"); + static_assert(std::is_unsigned::value, "T must be unsigned"); + // NOTE: Undefined if x > (1 << (std::numeric_limits::digits - 1)). + return (x < 2u) ? x : static_cast(1u) << (std::numeric_limits::digits - CLZ(x - 1u)); +} + +// Return highest possible N - a power of two - such that val >= N. +template +constexpr T TruncToPowerOfTwo(T val) { + static_assert(std::is_integral::value, "T must be integral"); + static_assert(std::is_unsigned::value, "T must be unsigned"); + return (val != 0) ? static_cast(1u) << (BitSizeOf() - CLZ(val) - 1u) : 0; +} + +template +constexpr bool IsPowerOfTwo(T x) { + static_assert(std::is_integral::value, "T must be integral"); + // TODO: assert unsigned. There is currently many uses with signed values. + return (x & (x - 1)) == 0; +} + +template +constexpr int WhichPowerOf2(T x) { + static_assert(std::is_integral::value, "T must be integral"); + return CTZ(x); +} + +// For rounding integers. +// Note: Omit the `n` from T type deduction, deduce only from the `x` argument. +template +constexpr T RoundDown(T x, typename Identity::type n); + +template +constexpr T RoundDown(T x, typename Identity::type n) { + return (x & -n); +} + +template +constexpr T RoundUp(T x, typename std::remove_reference::type n); + +template +constexpr T RoundUp(T x, typename std::remove_reference::type n) { + return RoundDown(x + n - 1, n); +} + +// For aligning pointers. +template +inline T *AlignDown(T *x, uintptr_t n); + +template +inline T *AlignDown(T *x, uintptr_t n) { + return reinterpret_cast(RoundDown(reinterpret_cast(x), n)); +} + +template +inline T *AlignUp(T *x, uintptr_t n); + +template +inline T *AlignUp(T *x, uintptr_t n) { + return reinterpret_cast(RoundUp(reinterpret_cast(x), n)); +} + +template +constexpr bool IsAligned(T x) { + static_assert((n & (n - 1)) == 0, "n is not a power of two"); + return (x & (n - 1)) == 0; +} + +template +inline bool IsAligned(T *x) { + return IsAligned(reinterpret_cast(x)); +} + +template +inline bool IsAlignedParam(T x, int n) { + return (x & (n - 1)) == 0; +} + +template +inline bool IsAlignedParam(T *x, int n) { + return IsAlignedParam(reinterpret_cast(x), n); +} + +#define CHECK_ALIGNED(value, alignment) \ + CHECK(::art::IsAligned(value)) << reinterpret_cast(value) + +#define DCHECK_ALIGNED(value, alignment) \ + DCHECK(::art::IsAligned(value)) << reinterpret_cast(value) + +#define CHECK_ALIGNED_PARAM(value, alignment) \ + CHECK(::art::IsAlignedParam(value, alignment)) << reinterpret_cast(value) + +#define DCHECK_ALIGNED_PARAM(value, alignment) \ + DCHECK(::art::IsAlignedParam(value, alignment)) << reinterpret_cast(value) + +inline uint16_t Low16Bits(uint32_t value) { + return static_cast(value); +} + +inline uint16_t High16Bits(uint32_t value) { + return static_cast(value >> 16); +} + +inline uint32_t Low32Bits(uint64_t value) { + return static_cast(value); +} + +inline uint32_t High32Bits(uint64_t value) { + return static_cast(value >> 32); +} + +// Check whether an N-bit two's-complement representation can hold value. +template +inline bool IsInt(size_t N, T value) { + if (N == BitSizeOf()) { + return true; + } else { + T limit = static_cast(1) << (N - 1u); + return (-limit <= value) && (value < limit); + } +} + +template +constexpr T GetIntLimit(size_t bits) { + return static_cast(1) << (bits - 1); +} + +template +constexpr bool IsInt(T value) { + static_assert(kBits > 0, "kBits cannot be zero."); + static_assert(kBits <= BitSizeOf(), "kBits must be <= max."); + static_assert(std::is_signed::value, "Needs a signed type."); + // Corner case for "use all bits." Can't use the limits, as they would overflow, but it is + // trivially true. + return (kBits == BitSizeOf()) ? + true : + (-GetIntLimit(kBits) <= value) && (value < GetIntLimit(kBits)); +} + +template +constexpr bool IsUint(T value) { + static_assert(kBits > 0, "kBits cannot be zero."); + static_assert(kBits <= BitSizeOf(), "kBits must be <= max."); + static_assert(std::is_integral::value, "Needs an integral type."); + // Corner case for "use all bits." Can't use the limits, as they would overflow, but it is + // trivially true. + // NOTE: To avoid triggering assertion in GetIntLimit(kBits+1) if kBits+1==BitSizeOf(), + // use GetIntLimit(kBits)*2u. The unsigned arithmetic works well for us if it overflows. + using unsigned_type = typename std::make_unsigned::type; + return (0 <= value) && + (kBits == BitSizeOf() || + (static_cast(value) <= GetIntLimit(kBits) * 2u - 1u)); +} + +template +constexpr bool IsAbsoluteUint(T value) { + static_assert(kBits <= BitSizeOf(), "kBits must be <= max."); + static_assert(std::is_integral::value, "Needs an integral type."); + using unsigned_type = typename std::make_unsigned::type; + return (kBits == BitSizeOf()) + ? true + : IsUint(value < 0 + ? static_cast(-1 - value) + 1u // Avoid overflow. + : static_cast(value)); +} + +// Generate maximum/minimum values for signed/unsigned n-bit integers +template +constexpr T MaxInt(size_t bits) { + using unsigned_type = typename std::make_unsigned::type; + return bits == BitSizeOf() + ? std::numeric_limits::max() + : std::is_signed::value + ? ((bits == 1u) ? 0 : static_cast(MaxInt(bits - 1))) + : static_cast(UINT64_C(1) << bits) - static_cast(1); +} + +template +constexpr T MinInt(size_t bits) { + return bits == BitSizeOf() + ? std::numeric_limits::min() + : std::is_signed::value + ? ((bits == 1u) ? -1 : static_cast(-1) - MaxInt(bits)) + : static_cast(0); +} + +// Returns value with bit set in lowest one-bit position or 0 if 0. (java.lang.X.lowestOneBit). +template +inline static kind LowestOneBitValue(kind opnd) { + // Hacker's Delight, Section 2-1 + return opnd & -opnd; +} + +// Returns value with bit set in hightest one-bit position or 0 if 0. (java.lang.X.highestOneBit). +template +inline static T HighestOneBitValue(T opnd) { + using unsigned_type = typename std::make_unsigned::type; + T res; + if (opnd == 0) { + res = 0; + } else { + int bit_position = BitSizeOf() - (CLZ(static_cast(opnd)) + 1); + res = static_cast(UINT64_C(1) << bit_position); + } + return res; +} + +// Rotate bits. +template +inline static T Rot(T opnd, int distance) { + int mask = BitSizeOf() - 1; + int unsigned_right_shift = left ? (-distance & mask) : (distance & mask); + int signed_left_shift = left ? (distance & mask) : (-distance & mask); + using unsigned_type = typename std::make_unsigned::type; + return (static_cast(opnd) >> unsigned_right_shift) | (opnd << signed_left_shift); +} + +// TUNING: use rbit for arm/arm64 +inline static uint32_t ReverseBits32(uint32_t opnd) { + // Hacker's Delight 7-1 + opnd = ((opnd >> 1) & 0x55555555) | ((opnd & 0x55555555) << 1); + opnd = ((opnd >> 2) & 0x33333333) | ((opnd & 0x33333333) << 2); + opnd = ((opnd >> 4) & 0x0F0F0F0F) | ((opnd & 0x0F0F0F0F) << 4); + opnd = ((opnd >> 8) & 0x00FF00FF) | ((opnd & 0x00FF00FF) << 8); + opnd = ((opnd >> 16)) | ((opnd) << 16); + return opnd; +} + +// TUNING: use rbit for arm/arm64 +inline static uint64_t ReverseBits64(uint64_t opnd) { + // Hacker's Delight 7-1 + opnd = (opnd & 0x5555555555555555L) << 1 | ((opnd >> 1) & 0x5555555555555555L); + opnd = (opnd & 0x3333333333333333L) << 2 | ((opnd >> 2) & 0x3333333333333333L); + opnd = (opnd & 0x0f0f0f0f0f0f0f0fL) << 4 | ((opnd >> 4) & 0x0f0f0f0f0f0f0f0fL); + opnd = (opnd & 0x00ff00ff00ff00ffL) << 8 | ((opnd >> 8) & 0x00ff00ff00ff00ffL); + opnd = (opnd << 48) | ((opnd & 0xffff0000L) << 16) | ((opnd >> 16) & 0xffff0000L) | + (opnd >> 48); + return opnd; +} + +// Create a mask for the least significant "bits" +// The returned value is always unsigned to prevent undefined behavior for bitwise ops. +// +// Given 'bits', +// Returns: +// <--- bits ---> +// +-----------------+------------+ +// | 0 ............0 | 1.....1 | +// +-----------------+------------+ +// msb lsb +template +inline static constexpr std::make_unsigned_t MaskLeastSignificant(size_t bits) { + using unsigned_T = std::make_unsigned_t; + if (bits >= BitSizeOf()) { + return std::numeric_limits::max(); + } else { + auto kOne = static_cast(1); // Do not truncate for T>size_t. + return static_cast((kOne << bits) - kOne); + } +} + +// Clears the bitfield starting at the least significant bit "lsb" with a bitwidth of 'width'. +// (Equivalent of ARM BFC instruction). +// +// Given: +// <-- width --> +// +--------+------------+--------+ +// | ABC... | bitfield | XYZ... + +// +--------+------------+--------+ +// lsb 0 +// Returns: +// <-- width --> +// +--------+------------+--------+ +// | ABC... | 0........0 | XYZ... + +// +--------+------------+--------+ +// lsb 0 +template +inline static constexpr T BitFieldClear(T value, size_t lsb, size_t width) { + const auto val = static_cast>(value); + const auto mask = MaskLeastSignificant(width); + + return static_cast(val & ~(mask << lsb)); +} + +// Inserts the contents of 'data' into bitfield of 'value' starting +// at the least significant bit "lsb" with a bitwidth of 'width'. +// Note: data must be within range of [MinInt(width), MaxInt(width)]. +// (Equivalent of ARM BFI instruction). +// +// Given (data): +// <-- width --> +// +--------+------------+--------+ +// | ABC... | bitfield | XYZ... + +// +--------+------------+--------+ +// lsb 0 +// Returns: +// <-- width --> +// +--------+------------+--------+ +// | ABC... | 0...data | XYZ... + +// +--------+------------+--------+ +// lsb 0 + +template +inline static constexpr T BitFieldInsert(T value, T2 data, size_t lsb, size_t width) { + const auto data_mask = MaskLeastSignificant(width); + const auto value_cleared = BitFieldClear(value, lsb, width); + + return static_cast(value_cleared | ((data & data_mask) << lsb)); +} + +// Extracts the bitfield starting at the least significant bit "lsb" with a bitwidth of 'width'. +// Signed types are sign-extended during extraction. (Equivalent of ARM UBFX/SBFX instruction). +// +// Given: +// <-- width --> +// +--------+-------------+-------+ +// | | bitfield | + +// +--------+-------------+-------+ +// lsb 0 +// (Unsigned) Returns: +// <-- width --> +// +----------------+-------------+ +// | 0... 0 | bitfield | +// +----------------+-------------+ +// 0 +// (Signed) Returns: +// <-- width --> +// +----------------+-------------+ +// | S... S | bitfield | +// +----------------+-------------+ +// 0 +// where S is the highest bit in 'bitfield'. +template +inline static constexpr T BitFieldExtract(T value, size_t lsb, size_t width) { + const auto val = static_cast>(value); + + const auto bitfield_unsigned = + static_cast((val >> lsb) & MaskLeastSignificant(width)); + if (std::is_signed::value) { + // Perform sign extension + if (width == 0) { // Avoid underflow. + return static_cast(0); + } else if (bitfield_unsigned & (1 << (width - 1))) { // Detect if sign bit was set. + // MSB LSB + // 0b11111...100...000000 + const auto ones_negmask = ~MaskLeastSignificant(width); + return static_cast(bitfield_unsigned | ones_negmask); + } + } + // Skip sign extension. + return bitfield_unsigned; +} + +inline static constexpr size_t BitsToBytesRoundUp(size_t num_bits) { + return RoundUp(num_bits, kBitsPerByte) / kBitsPerByte; +} + +} // namespace whale + +#endif // WHALE_BASE_BIT_UTILS_H_ + diff --git a/module/src/main/cpp/whale/src/base/cxx_helper.h b/module/src/main/cpp/whale/src/base/cxx_helper.h new file mode 100644 index 00000000..5835a79b --- /dev/null +++ b/module/src/main/cpp/whale/src/base/cxx_helper.h @@ -0,0 +1,53 @@ +#ifndef WHALE_BASE_CXX_HELPER_H_ +#define WHALE_BASE_CXX_HELPER_H_ + +#include +#include "base/primitive_types.h" + +template +U ForceCast(T *x) { + return (U) (uintptr_t) x; +} + +template +U ForceCast(T &x) { + return *(U *) &x; +} + +template +struct Identity { + using type = T; +}; + +template +static inline R OffsetOf(uintptr_t ptr, size_t offset) { + return reinterpret_cast(ptr + offset); +} + +template +static inline R OffsetOf(intptr_t ptr, size_t offset) { + return reinterpret_cast(ptr + offset); +} + +template +static inline R OffsetOf(ptr_t ptr, size_t offset) { + return (R) (reinterpret_cast(ptr) + offset); +} + +template +static inline T MemberOf(ptr_t ptr, size_t offset) { + return *OffsetOf(ptr, offset); +} + +static inline size_t DistanceOf(ptr_t a, ptr_t b) { + return static_cast( + abs(reinterpret_cast(b) - reinterpret_cast(a)) + ); +} + +template +static inline void AssignOffset(ptr_t ptr, size_t offset, T member) { + *OffsetOf(ptr, offset) = member; +} + +#endif // WHALE_BASE_CXX_HELPER_H_ diff --git a/module/src/main/cpp/whale/src/base/enums.h b/module/src/main/cpp/whale/src/base/enums.h new file mode 100644 index 00000000..eeac3c3d --- /dev/null +++ b/module/src/main/cpp/whale/src/base/enums.h @@ -0,0 +1,32 @@ +#ifndef WHALE_BASE_ENUMS_H_ +#define WHALE_BASE_ENUMS_H_ + + +#include +#include + +enum class PointerSize : size_t { + k32 = 4, + k64 = 8 +}; + +inline std::ostream &operator<<(std::ostream &os, const PointerSize &rhs) { + switch (rhs) { + case PointerSize::k32: + os << "k32"; + break; + case PointerSize::k64: + os << "k64"; + break; + default: + os << "PointerSize[" << static_cast(rhs) << "]"; + break; + } + return os; +} + +static constexpr PointerSize kRuntimePointerSize = sizeof(void *) == 8U + ? PointerSize::k64 + : PointerSize::k32; + +#endif // WHALE_BASE_ENUMS_H_ diff --git a/module/src/main/cpp/whale/src/base/logging.h b/module/src/main/cpp/whale/src/base/logging.h new file mode 100644 index 00000000..3b9bfe78 --- /dev/null +++ b/module/src/main/cpp/whale/src/base/logging.h @@ -0,0 +1,222 @@ +#ifndef _WHALE_BASE_LOGGING_H_ +#define _WHALE_BASE_LOGGING_H_ + +#include +#include + +#include "base/macros.h" + +#ifdef __ANDROID__ +#include +#endif + + +#define CHECK(x) \ + if (UNLIKELY(!(x))) /* NOLINT*/ \ + whale::LogMessageFatal(__FILE__, __LINE__).stream() \ + << "Check failed: " #x << " " + +#define CHECK_OP(LHS, RHS, OP) \ + for (auto _values = whale::MakeEagerEvaluator(LHS, RHS); \ + UNLIKELY(!(_values.lhs OP _values.rhs)); /* empty */) \ + whale::LogMessage(__FILE__, __LINE__).stream() \ + << "Check failed: " << #LHS << " " << #OP << " " << #RHS \ + << " (" #LHS "=" << _values.lhs << ", " #RHS "=" << _values.rhs << ") " + +#define CHECK_EQ(x, y) CHECK_OP(x, y, ==) +#define CHECK_NE(x, y) CHECK_OP(x, y, !=) +#define CHECK_LE(x, y) CHECK_OP(x, y, <=) +#define CHECK_LT(x, y) CHECK_OP(x, y, <) +#define CHECK_GE(x, y) CHECK_OP(x, y, >=) +#define CHECK_GT(x, y) CHECK_OP(x, y, >) + +#define CHECK_STROP(s1, s2, sense) \ + if (UNLIKELY((strcmp(s1, s2) == 0) != sense)) \ + LOG(FATAL) << "Check failed: " \ + << "\"" << s1 << "\"" \ + << (sense ? " == " : " != ") \ + << "\"" << s2 << "\"" + +#define CHECK_STREQ(s1, s2) CHECK_STROP(s1, s2, true) +#define CHECK_STRNE(s1, s2) CHECK_STROP(s1, s2, false) + +#define CHECK_CONSTEXPR(x, out, dummy) \ + (UNLIKELY(!(x))) ? (LOG(FATAL) << "Check failed: " << #x out, dummy) : + +#ifndef NDEBUG + +#define DCHECK(x) CHECK(x) +#define DCHECK_EQ(x, y) CHECK_EQ(x, y) +#define DCHECK_NE(x, y) CHECK_NE(x, y) +#define DCHECK_LE(x, y) CHECK_LE(x, y) +#define DCHECK_LT(x, y) CHECK_LT(x, y) +#define DCHECK_GE(x, y) CHECK_GE(x, y) +#define DCHECK_GT(x, y) CHECK_GT(x, y) +#define DCHECK_STREQ(s1, s2) CHECK_STREQ(s1, s2) +#define DCHECK_STRNE(s1, s2) CHECK_STRNE(s1, s2) +#define DCHECK_CONSTEXPR(x, out, dummy) CHECK_CONSTEXPR(x, out, dummy) + +#else // NDEBUG + +#define DCHECK(condition) \ + while (false) \ + CHECK(condition) + +#define DCHECK_EQ(val1, val2) \ + while (false) \ + CHECK_EQ(val1, val2) + +#define DCHECK_NE(val1, val2) \ + while (false) \ + CHECK_NE(val1, val2) + +#define DCHECK_LE(val1, val2) \ + while (false) \ + CHECK_LE(val1, val2) + +#define DCHECK_LT(val1, val2) \ + while (false) \ + CHECK_LT(val1, val2) + +#define DCHECK_GE(val1, val2) \ + while (false) \ + CHECK_GE(val1, val2) + +#define DCHECK_GT(val1, val2) \ + while (false) \ + CHECK_GT(val1, val2) + +#define DCHECK_STREQ(str1, str2) \ + while (false) \ + CHECK_STREQ(str1, str2) + +#define DCHECK_STRNE(str1, str2) \ + while (false) \ + CHECK_STRNE(str1, str2) + +#define DCHECK_CONSTEXPR(x, out, dummy) \ + (false && (x)) ? (dummy) : + +#endif + + +#define LOG_INFO whale::LogMessage(__FILE__, __LINE__) +#define LOG_WARNING whale::LogMessage(__FILE__, __LINE__) +#define LOG_ERROR whale::LogMessage(__FILE__, __LINE__) +#define LOG_FATAL whale::LogMessageFatal(__FILE__, __LINE__) +#define LOG_QFATAL LOG_FATAL + +#ifdef NDEBUG +#define LOG_DFATAL LOG_ERROR +#else +#define LOG_DFATAL LOG_FATAL +#endif + +#define LOG(severity) LOG_ ## severity.stream() + +#define VLOG(x) if ((x) > 0) {} else LOG_INFO.stream() // NOLINT + +namespace whale { + +template +struct EagerEvaluator { + EagerEvaluator(LHS lhs, RHS rhs) : lhs(lhs), rhs(rhs) {} + + LHS lhs; + RHS rhs; +}; + +template +EagerEvaluator MakeEagerEvaluator(LHS lhs, RHS rhs) { + return EagerEvaluator(lhs, rhs); +} + +#define EAGER_PTR_EVALUATOR(T1, T2) \ + template <> struct EagerEvaluator { \ + EagerEvaluator(T1 lhs, T2 rhs) \ + : lhs(reinterpret_cast(lhs)), \ + rhs(reinterpret_cast(rhs)) { } \ + const void* lhs; \ + const void* rhs; \ + } + +EAGER_PTR_EVALUATOR(const char*, const char*); + +EAGER_PTR_EVALUATOR(const char*, char*); + +EAGER_PTR_EVALUATOR(char*, const char*); + +EAGER_PTR_EVALUATOR(char*, char*); + +EAGER_PTR_EVALUATOR(const unsigned char*, const unsigned char*); + +EAGER_PTR_EVALUATOR(const unsigned char*, unsigned char*); + +EAGER_PTR_EVALUATOR(unsigned char*, const unsigned char*); + +EAGER_PTR_EVALUATOR(unsigned char*, unsigned char*); + +EAGER_PTR_EVALUATOR(const signed char*, const signed char*); + +EAGER_PTR_EVALUATOR(const signed char*, signed char*); + +EAGER_PTR_EVALUATOR(signed char*, const signed char*); + +EAGER_PTR_EVALUATOR(signed char*, signed char*); + + +class LogMessage { + + public: + LogMessage(const char *file, int line) + : flushed_(false) { + } + + LogMessage(const LogMessage &) = delete; + + LogMessage &operator=(const LogMessage &) = delete; + + void Flush() { + std::string s = str_.str(); + size_t n = s.size(); +#ifdef __ANDROID__ + __android_log_write(ANDROID_LOG_ERROR, "Whale", s.c_str()); +#else + fwrite(s.data(), 1, n, stderr); +#endif + flushed_ = true; + } + + virtual ~LogMessage() { + if (!flushed_) { + Flush(); + } + } + + std::ostream &stream() { return str_; } + + private: + bool flushed_; + std::ostringstream str_; +}; + + +class LogMessageFatal : public LogMessage { + + public: + LogMessageFatal(const char *file, int line) + : LogMessage(file, line) {} + + LogMessageFatal(const LogMessageFatal &) = delete; + + LogMessageFatal &operator=(const LogMessageFatal &) = delete; + + NO_RETURN ~LogMessageFatal() override { + Flush(); + abort(); + } +}; + +} // namespace whale + +#endif // _WHALE_BASE_LOGGING_H_ diff --git a/module/src/main/cpp/whale/src/base/macros.h b/module/src/main/cpp/whale/src/base/macros.h new file mode 100644 index 00000000..d8354f70 --- /dev/null +++ b/module/src/main/cpp/whale/src/base/macros.h @@ -0,0 +1,51 @@ +#ifndef WHALE_BASE_MACROS_H_ +#define WHALE_BASE_MACROS_H_ + +#define DISALLOW_ALLOCATION() \ + public: \ + NO_RETURN ALWAYS_INLINE void operator delete(void*, size_t) { UNREACHABLE(); } \ + ALWAYS_INLINE void* operator new(size_t, void* ptr) noexcept { return ptr; } \ + ALWAYS_INLINE void operator delete(void*, void*) noexcept { } \ + private: \ + void* operator new(size_t) = delete // NOLINT + +#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \ +private: \ +TypeName(); \ +DISALLOW_COPY_AND_ASSIGN(TypeName) + +#define ALIGNED(x) __attribute__ ((__aligned__(x))) +#define PACKED(x) __attribute__ ((__aligned__(x), __packed__)) + +#define OFFSETOF_MEMBER(t, f) offsetof(t, f) + +// Stringify the argument. +#define QUOTE(x) #x +#define STRINGIFY(x) QUOTE(x) + +#ifndef NDEBUG +#define ALWAYS_INLINE +#else +#define ALWAYS_INLINE __attribute__ ((always_inline)) +#endif + +// Define that a position within code is unreachable, for example: +// int foo () { LOG(FATAL) << "Don't call me"; UNREACHABLE(); } +// without the UNREACHABLE a return statement would be necessary. +#define UNREACHABLE __builtin_unreachable + +#define LIKELY(x) __builtin_expect(!!(x), 1) +#define UNLIKELY(x) __builtin_expect(!!(x), 0) + +#define NO_RETURN [[ noreturn ]] + +#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&); \ + void operator=(const TypeName&) + +#define OPTION __unused + +#define OPEN_API __attribute__((visibility("default"))) +#define C_API extern "C" + +#endif // WHALE_BASE_MACROS_H_ diff --git a/module/src/main/cpp/whale/src/base/offsets.h b/module/src/main/cpp/whale/src/base/offsets.h new file mode 100644 index 00000000..08db6645 --- /dev/null +++ b/module/src/main/cpp/whale/src/base/offsets.h @@ -0,0 +1,60 @@ +#ifndef WHALE_BASE_OFFSETS_H_ +#define WHALE_BASE_OFFSETS_H_ + +#include +#include +#include "base/enums.h" + +namespace whale { + + +// Allow the meaning of offsets to be strongly typed. +class Offset { + public: + constexpr explicit Offset(size_t val) : val_(val) {} + + constexpr int32_t Int32Value() const { + return static_cast(val_); + } + + constexpr uint32_t Uint32Value() const { + return static_cast(val_); + } + + constexpr size_t SizeValue() const { + return val_; + } + + protected: + size_t val_; +}; + +// Offsets relative to the current frame. +class FrameOffset : public Offset { + public: + constexpr explicit FrameOffset(size_t val) : Offset(val) {} + + bool operator>(FrameOffset other) const { return val_ > other.val_; } + + bool operator<(FrameOffset other) const { return val_ < other.val_; } +}; + +// Offsets relative to the current running thread. +template +class ThreadOffset : public Offset { + public: + constexpr explicit ThreadOffset(size_t val) : Offset(val) {} +}; + +using ThreadOffset32 = ThreadOffset; +using ThreadOffset64 = ThreadOffset; + +// Offsets relative to an object. +class MemberOffset : public Offset { + public: + constexpr explicit MemberOffset(size_t val) : Offset(val) {} +}; + +} + +#endif // WHALE_BASE_OFFSETS_H_ diff --git a/module/src/main/cpp/whale/src/base/primitive_types.h b/module/src/main/cpp/whale/src/base/primitive_types.h new file mode 100644 index 00000000..8199bd62 --- /dev/null +++ b/module/src/main/cpp/whale/src/base/primitive_types.h @@ -0,0 +1,22 @@ +#ifndef WHALE_BASE_PRIMITIVE_TYPES_H_ +#define WHALE_BASE_PRIMITIVE_TYPES_H_ + +#include +#include + +typedef uint8_t byte; +typedef uint8_t u1; +typedef uint16_t u2; +typedef uint32_t u4; +typedef uint64_t u8; + + +typedef int8_t s1; +typedef int16_t s2; +typedef int32_t s4; +typedef int64_t s8; + +typedef size_t offset_t; +typedef void* ptr_t; + +#endif // WHALE_BASE_PRIMITIVE_TYPES_H_ diff --git a/module/src/main/cpp/whale/src/base/singleton.h b/module/src/main/cpp/whale/src/base/singleton.h new file mode 100644 index 00000000..3beb67e0 --- /dev/null +++ b/module/src/main/cpp/whale/src/base/singleton.h @@ -0,0 +1,32 @@ +#ifndef WHALE_BASE_SINGLETON_H_ +#define WHALE_BASE_SINGLETON_H_ + +template +class Singleton { + public: + Singleton(std::function init_function) : init_function_(init_function), + initialized_(false) {} + + void Ensure() { + if (!initialized_) { + std::lock_guard guard(lock_); + if (!initialized_) { + init_function_(&instance_); + initialized_ = true; + } + } + } + + T Get() { + Ensure(); + return instance_; + } + + private: + typename std::conditional::value, bool, T>::type instance_; + std::mutex lock_; + std::function init_function_; + bool initialized_; +}; + +#endif // WHALE_BASE_SINGLETON_H_ diff --git a/module/src/main/cpp/whale/src/base/stringprintf.h b/module/src/main/cpp/whale/src/base/stringprintf.h new file mode 100644 index 00000000..276f75dc --- /dev/null +++ b/module/src/main/cpp/whale/src/base/stringprintf.h @@ -0,0 +1,22 @@ +#ifndef WHALE_BASE_STRINGPRINTF_H_ +#define WHALE_BASE_STRINGPRINTF_H_ + +#include +#include + +namespace whale { + +// Returns a string corresponding to printf-like formatting of the arguments. +std::string StringPrintf(const char *fmt, ...) +__attribute__((__format__(__printf__, 1, 2))); + +// Appends a printf-like formatting of the arguments to 'dst'. +void StringAppendF(std::string *dst, const char *fmt, ...) +__attribute__((__format__(__printf__, 2, 3))); + +// Appends a printf-like formatting of the arguments to 'dst'. +void StringAppendV(std::string *dst, const char *format, va_list ap); + +} // namespace art + +#endif // WHALE_BASE_STRINGPRINTF_H_ diff --git a/module/src/main/cpp/whale/src/dbi/arm/decoder_arm.cc b/module/src/main/cpp/whale/src/dbi/arm/decoder_arm.cc new file mode 100644 index 00000000..67593821 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/arm/decoder_arm.cc @@ -0,0 +1,21 @@ +#include "dbi/arm/decoder_arm.h" + +#define CASE(mask, val, type) \ +if ((((insn) & (mask)) == val)) { \ + return type; \ +} + +namespace whale { +namespace arm { + +ArmInsnType DecodeArm(u4 insn) { + CASE(0xe5f0000, 0x41f0000, kARM_LDR); + CASE(0xfef0010, 0x8f0000, kARM_ADD); + CASE(0xdef0000, 0x1a00000, kARM_MOV); + CASE(0xf000000, 0xa000000, kARM_B); + CASE(0xf000000, 0xb000000, kARM_BL); + return kARM_UNHANDLED; +} + +} // namespace arm +} // namespace whale diff --git a/module/src/main/cpp/whale/src/dbi/arm/decoder_arm.h b/module/src/main/cpp/whale/src/dbi/arm/decoder_arm.h new file mode 100644 index 00000000..2c3ea50b --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/arm/decoder_arm.h @@ -0,0 +1,32 @@ +#ifndef ARCH_ARM_DECODER_ARM_H_ +#define ARCH_ARM_DECODER_ARM_H_ + +#include "base/primitive_types.h" + +namespace whale { +namespace arm { + +enum ArmInsnType { + kARM_LDR, + kARM_ADD, + kARM_MOV, + kARM_B, + kARM_BL, + kARM_VFP_VSTM_DP, + kARM_VFP_VSTM_SP, + kARM_VFP_VLDM_SP, + kARM_VFP_VLDM_DP, + kARM_VFP_VSTR_DP, + kARM_VFP_VSTR_SP, + kARM_VFP_VLDR_DP, + kARM_VFP_VLDR_SP, + + kARM_UNHANDLED, +}; + +ArmInsnType DecodeArm(u4 insn); + +} // namespace arm +} // namespace whale + +#endif // ARCH_ARM_DECODER_ARM_H_ diff --git a/module/src/main/cpp/whale/src/dbi/arm/decoder_thumb.cc b/module/src/main/cpp/whale/src/dbi/arm/decoder_thumb.cc new file mode 100644 index 00000000..b7d8486d --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/arm/decoder_thumb.cc @@ -0,0 +1,41 @@ +#include "dbi/arm/decoder_thumb.h" + +#define CASE(mask, val, type) \ +if ((((insn) & (mask)) == val)) { \ + return type; \ +} + +namespace whale { +namespace arm { + + +ThumbInsnType DecodeThumb16(u2 insn) { + CASE(0xf800, 0xa000, kTHUMB_ADD_FROM_PC16); + CASE(0xff00, 0x4400, kTHUMB_ADDH16); + CASE(0xf800, 0xe000, kTHUMB_B16); + CASE(0xf000, 0xd000, kTHUMB_B_COND16); + CASE(0xff87, 0x4780, kTHUMB_BLX16); + CASE(0xff87, 0x4700, kTHUMB_BX16); + CASE(0xfd00, 0xb900, kTHUMB_CBNZ16); + CASE(0xfd00, 0xb100, kTHUMB_CBZ16); + CASE(0xff00, 0x4500, kTHUMB_CMPH16); + CASE(0xf800, 0x4800, kTHUMB_LDR_PC_16); + CASE(0xff00, 0x4600, kTHUMB_MOVH16); + return kTHUMB_UNHANDLED16; +} + +ThumbInsnType DecodeThumb32(u4 insn) { + CASE(0xf800d000, 0xf0009000, kTHUMB_B32); + CASE(0xf800d000, 0xf000d000, kTHUMB_BL32); + CASE(0xf800d000, 0xf000c000, kTHUMB_BL_ARM32); + CASE(0xff7f0000, 0xf81f0000, kTHUMB_LDRBL32); + CASE(0xff7f0000, 0xf83f0000, kTHUMB_LDRHL32); + CASE(0xff7f0000, 0xf85f0000, kTHUMB_LDRL32); + CASE(0xfff00fc0, 0xf9100000, kTHUMB_LDRSBL32); + CASE(0xff7f0000, 0xf93f0000, kTHUMB_LDRSHL32); + return kTHUMB_UNHANDLED32; +} + + +} // namespace arm +} // namespace whale diff --git a/module/src/main/cpp/whale/src/dbi/arm/decoder_thumb.h b/module/src/main/cpp/whale/src/dbi/arm/decoder_thumb.h new file mode 100644 index 00000000..8daee5a4 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/arm/decoder_thumb.h @@ -0,0 +1,51 @@ +#ifndef ARCH_ARM_DECODER_THUMB_H_ +#define ARCH_ARM_DECODER_THUMB_H_ + +#include "base/primitive_types.h" + +namespace whale { +namespace arm { + +enum ThumbInsnType { + kTHUMB_ADD_FROM_PC16, + kTHUMB_ADDH16, + kTHUMB_B16, + kTHUMB_B_COND16, + kTHUMB_BLX16, + kTHUMB_BX16, + kTHUMB_CBNZ16, + kTHUMB_CBZ16, + kTHUMB_CMPH16, + kTHUMB_LDR_PC_16, + kTHUMB_MOVH16, + kTHUMB_UNHANDLED16, + + kTHUMB_B32, + kTHUMB_BL32, + kTHUMB_BL_ARM32, + kTHUMB_LDRBL32, + kTHUMB_LDRHL32, + kTHUMB_LDRL32, + kTHUMB_LDRSBL32, + kTHUMB_LDRSHL32, + kTHUMB_UNHANDLED32 +}; + +ThumbInsnType DecodeThumb16(u2 insn); + +ThumbInsnType DecodeThumb32(u4 insn); + +/** + * Bit[15:11] in Thumb32 : + * 0b11101 + * 0b11110 + * 0b11111 + */ +static inline bool Is32BitThumbInstruction(u2 insn) { + return ((insn & 0xF000) == 0xF000) || ((insn & 0xF800) == 0xE800); +} + +} // namespace arm +} // namespace whale + +#endif // ARCH_ARM_DECODER_THUMB_H_ diff --git a/module/src/main/cpp/whale/src/dbi/arm/inline_hook_arm.cc b/module/src/main/cpp/whale/src/dbi/arm/inline_hook_arm.cc new file mode 100644 index 00000000..8e6d75a5 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/arm/inline_hook_arm.cc @@ -0,0 +1,112 @@ +#include + +#include "platform/memory.h" +#include "assembler/vixl/aarch32/macro-assembler-aarch32.h" +#include "dbi/arm/inline_hook_arm.h" +#include "dbi/arm/instruction_rewriter_arm.h" +#include "base/logging.h" +#include "base/primitive_types.h" + +#ifdef WHALE_DISASM_AFTER_REWRITE +#include "assembler/vixl/aarch32/disasm-aarch32.h" +#endif + +#define __ masm. + +namespace whale { +namespace arm { + +using namespace vixl::aarch32; // NOLINT + +#ifdef WHALE_DISASM_AFTER_REWRITE + +void Disassemble(MacroAssembler *masm) { + whale::LogMessage log("Disassembler", static_cast(masm->GetSizeOfCodeGenerated())); + vixl::aarch32::PrintDisassembler disassembler(log.stream()); + disassembler.DisassembleA32Buffer(masm->GetBuffer()->GetStartAddress(), + masm->GetBuffer()->GetSizeInBytes()); +} + +#endif + + +void ArmInlineHook::StartHook() { + DCHECK(address_ != 0 && replace_ != 0); + MacroAssembler masm; + is_thumb_ ? masm.UseT32() : masm.UseA32(); + Literal replace(static_cast(replace_)); + if (is_thumb_) { + if (address_ % 4 != 0) { + __ Nop(); + } + } + __ Ldr(pc, &replace); + __ Place(&replace); + masm.FinalizeCode(); + + size_t backup_size = masm.GetSizeOfCodeGenerated(); + u2 *target = GetTarget(); + if (is_thumb_) { + if (!Is32BitThumbInstruction(target[backup_size / sizeof(u2) - 2]) + && Is32BitThumbInstruction(target[backup_size / sizeof(u2) - 1])) { + backup_size += sizeof(u2); + } + } + backup_code_ = new BackupCode(GetTarget(), backup_size); + + if (backup_ != nullptr) { + intptr_t tail = address_ + backup_size; + intptr_t trampoline = BuildTrampoline(static_cast(tail)); + *backup_ = trampoline; + } + + ScopedMemoryPatch patch(GetTarget(), masm.GetBuffer()->GetStartAddress(), + masm.GetBuffer()->GetSizeInBytes()); + memcpy(GetTarget(), masm.GetBuffer()->GetStartAddress(), + masm.GetBuffer()->GetSizeInBytes()); +} + +intptr_t +ArmInlineHook::BuildTrampoline(u4 tail) { + MacroAssembler masm; + is_thumb_ ? masm.UseT32() : masm.UseA32(); + ArmInstructionRewriter rewriter(&masm, backup_code_, GetTarget(), tail, is_thumb_); + rewriter.Rewrite(); + if (is_thumb_) { + tail |= 1; + } + Literal target(static_cast(tail)); + __ Ldr(pc, &target); + __ Place(&target); + + masm.FinalizeCode(); + +#ifdef WHALE_DISASM_AFTER_REWRITE + Disassemble(&masm); +#endif + + size_t size = masm.GetBuffer()->GetSizeInBytes(); + trampoline_addr_ = mmap(nullptr, GetPageSize(), PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, 0, 0); + memcpy(trampoline_addr_, masm.GetBuffer()->GetStartAddress(), size); + mprotect(trampoline_addr_, GetPageSize(), PROT_READ | PROT_EXEC); + auto trampoline_addr = reinterpret_cast(trampoline_addr_); + if (is_thumb_) { + trampoline_addr |= 1; + } + return trampoline_addr; +} + + +void ArmInlineHook::StopHook() { + size_t code_size = backup_code_->GetSizeInBytes(); + void *insns = backup_code_->GetInstructions(); + ScopedMemoryPatch patch(GetTarget(), insns, code_size); + memcpy(GetTarget(), insns, code_size); + if (trampoline_addr_ != nullptr) { + munmap(trampoline_addr_, GetPageSize()); + } +} + +} // namespace arm +} // namespace whale diff --git a/module/src/main/cpp/whale/src/dbi/arm/inline_hook_arm.h b/module/src/main/cpp/whale/src/dbi/arm/inline_hook_arm.h new file mode 100644 index 00000000..fbc5f1a7 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/arm/inline_hook_arm.h @@ -0,0 +1,38 @@ +#ifndef ARCH_ARM_INLINEHOOK_ARM_H_ +#define ARCH_ARM_INLINEHOOK_ARM_H_ + +#include "dbi/backup_code.h" +#include "dbi/hook_common.h" +#include "base/primitive_types.h" +#include "base/align.h" + +namespace whale { +namespace arm { + +class ArmInlineHook : public InlineHook { + public: + ArmInlineHook(intptr_t address, intptr_t replace, intptr_t *backup) + : InlineHook(address, replace, backup) { + is_thumb_ = static_cast(address & 0x1); + if (is_thumb_) { + address_ = RoundDown(address, 2); + } + } + + void StartHook() override; + + void StopHook() override; + + private: + bool is_thumb_; + BackupCode *backup_code_; + void *trampoline_addr_; + + intptr_t BuildTrampoline(u4 tail); +}; + +} // namespace arm +} // namespace whale + +#endif // ARCH_ARM_INLINEHOOK_ARM_H_ + diff --git a/module/src/main/cpp/whale/src/dbi/arm/instruction_rewriter_arm.cc b/module/src/main/cpp/whale/src/dbi/arm/instruction_rewriter_arm.cc new file mode 100644 index 00000000..257e27f1 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/arm/instruction_rewriter_arm.cc @@ -0,0 +1,406 @@ +#include "assembler/vixl/aarch32/macro-assembler-aarch32.h" +#include "dbi/arm/instruction_rewriter_arm.h" +#include "dbi/arm/decoder_arm.h" +#include "dbi/arm/decoder_thumb.h" +#include "dbi/arm/registers_arm.h" +#include "dbi/backup_code.h" +#include "dbi/instruction_rewriter.h" +#include "dbi/instruction_set.h" +#include "base/macros.h" +#include "base/align.h" +#include "base/primitive_types.h" +#include "base/logging.h" + +#define __ masm_-> +#define UNREACHABLE_BRANCH() LOG(FATAL) << "Unreachable branch"; \ + UNREACHABLE() + + +namespace whale { +namespace arm { + +using namespace vixl::aarch32; // NOLINT + +void arm::ArmInstructionRewriter::Rewrite() { + if (is_thumb_) { + RewriteThumb(); + } else { + RewriteArm(); + } +} + +void ArmInstructionRewriter::RewriteArm() { + u4 pc = cfg_pc_ + 8; + u4 *instructions = code_->GetInstructions(); + for (int i = 0; i < code_->GetCount(); ++i) { + u4 insn = instructions[i]; + ArmInsnType type = DecodeArm(insn); + switch (type) { + case ArmInsnType::kARM_LDR: + RewriteArm_LDR(type, pc, insn); + break; + case ArmInsnType::kARM_ADD: + RewriteArm_Add(type, pc, insn); + break; + case ArmInsnType::kARM_B: + case ArmInsnType::kARM_BL: + RewriteArm_B(type, pc, insn); + break; + default: + EmitA32(insn); + break; + } + pc += 4; + } +} + +void ArmInstructionRewriter::RewriteThumb() { + u4 pc = cfg_pc_ + 4; + InstructionIterator it(code_->GetInstructions(), code_->GetSizeInBytes()); + while (it.HasNext()) { + u2 insn_16 = it.GetCurrent(); + u4 insn_32 = 0; + bool is_32bit = Is32BitThumbInstruction(insn_16); + if (is_32bit) { + insn_32 = ((insn_16) << 16) | *(it.GetCurrentRef() + 1); + } + u4 width = is_32bit ? 4 : 2; + ThumbInsnType type = is_32bit ? DecodeThumb32(insn_32) : DecodeThumb16(insn_16); + switch (type) { + case kTHUMB_B_COND16: + RewriteThumb_B_COND16(pc, insn_16); + break; + case kTHUMB_B16: + RewriteThumb_B16(pc, insn_16); + break; + case kTHUMB_BX16: + case kTHUMB_BLX16: + RewriteThumb_BX16(type, pc, insn_16); + break; + case kTHUMB_B32: + case kTHUMB_BL32: + case kTHUMB_BL_ARM32: + RewriteThumb_B32(type, pc, insn_16); + break; + case kTHUMB_CBZ16: + case kTHUMB_CBNZ16: + RewriteThumb_CBZ16(type, pc, insn_16); + break; + case kTHUMB_LDR_PC_16: + RewriteThumb_LDR_PC16(pc, insn_16); + break; + case kTHUMB_ADD_FROM_PC16: + RewriteThumb_ADD_FROM_PC16(pc, insn_16); + break; + case kTHUMB_ADDH16: + case kTHUMB_CMPH16: + case kTHUMB_MOVH16: + RewriteThumb_DataProcessing16(type, pc, insn_16); + break; + case kTHUMB_LDRBL32: + case kTHUMB_LDRHL32: + case kTHUMB_LDRL32: + case kTHUMB_LDRSBL32: + case kTHUMB_LDRSHL32: + RewriteThumb_LDRL32(type, pc, insn_32); + break; + default: + if (is_32bit) { + EmitT32(insn_32); + } else { + EmitT16(insn_16); + } + break; + } // end switch + pc += width; + it.Step(width); + } // end while +} + +/** + * Rewrite Instruction for this scheme: + * ldr rd, [pc, #offset] + */ +void ArmInstructionRewriter::RewriteThumb_LDR_PC16(u4 pc, u2 insn) { + int imm8 = (insn >> 0) & 0xff; + auto rd = Reg(((insn >> 8) & 0x7)); + int offset = imm8 << 2; + u4 pcrel_address = RoundDown(pc + offset, 4); + /* + * How to reproduce this case: + * ldr r0, [pc] + * b #2 + * .word 1234 + * bx lr + */ + if (pcrel_address < tail_pc_) { + EmitT16(insn); + } else { + __ Mov(rd, pcrel_address); + __ Ldr(rd, MemOperand(rd, 0)); + } +} + +/** + * Rewrite Instruction for the following scheme: + * pcrel add, cmp, mov + */ +void +ArmInstructionRewriter::RewriteThumb_DataProcessing16(ThumbInsnType type, u4 cfg_pc, u2 insn) { + int dn = (insn >> 7) & 0x1; + auto rm = Reg((insn >> 3) & 0xf); + auto rd = Reg(((insn >> 0) & 0x7) | (dn << 3)); + + if (!rd.IsPC() && !rm.IsPC()) { + EmitT16(insn); + return; + } + if (rd.Is(rm)) { + EmitT16(insn); + } else if (rm.IsPC()) { + auto scratch_reg = rd.Is(r0) ? r1 : r0; + __ Push(scratch_reg); + __ Mov(scratch_reg, cfg_pc); + switch (type) { + case kTHUMB_ADDH16: + __ Add(rd, rd, scratch_reg); + break; + case kTHUMB_CMPH16: + __ Cmp(rd, scratch_reg); + break; + case kTHUMB_MOVH16: + __ Mov(rd, scratch_reg); + break; + default: + UNREACHABLE_BRANCH(); + } + __ Pop(scratch_reg); + } else { // rd == pc + Register scratch_reg = (rm.Is(r0)) ? r1 : r0; + __ Push(scratch_reg); + __ Mov(scratch_reg, cfg_pc); + switch (type) { + case kTHUMB_ADDH16: + __ Add(pc, pc, scratch_reg); + break; + case kTHUMB_CMPH16: + __ Cmp(pc, scratch_reg); + break; + case kTHUMB_MOVH16: + __ Mov(pc, scratch_reg); + break; + default: + UNREACHABLE_BRANCH(); + } + __ Pop(scratch_reg); + } +} + +void ArmInstructionRewriter::RewriteThumb_LDRL32(ThumbInsnType type, u4 pc, u4 insn) { + Register rt = Reg((insn >> 12) & 0xf); + u4 imm12 = (insn >> 0) & 0xfff; + u4 upwards = (insn >> 23) & 0x1; + u4 pcrel_address = RoundDown((pc + (upwards ? imm12 : -imm12)), 4); + if (pcrel_address < tail_pc_) { + EmitT32(insn); + return; + } + __ Mov(rt, pcrel_address); + switch (type) { + case kTHUMB_LDRBL32: + __ Ldrb(rt, MemOperand(rt, 0)); + break; + case kTHUMB_LDRHL32: + __ Ldrh(rt, MemOperand(rt, 0)); + break; + case kTHUMB_LDRL32: + __ Ldr(rt, MemOperand(rt, 0)); + break; + case kTHUMB_LDRSBL32: + __ Ldrsb(rt, MemOperand(rt, 0)); + break; + case kTHUMB_LDRSHL32: + __ Ldrsh(rt, MemOperand(rt, 0)); + break; + default: + UNREACHABLE_BRANCH(); + } +} + +void ArmInstructionRewriter::RewriteThumb_B_COND16(u4 cfg_pc, u2 insn) { + u2 cond = (insn >> 8) & 0xf; + int imm8 = (insn >> 0) & 0xff; + int branch_offset = (static_cast(imm8)) << 1; + u4 pcrel_address = RoundDown(cfg_pc + branch_offset, 4) | 1; + + if (pcrel_address < tail_pc_) { + EmitT16(insn); + return; + } + if (cond == al) { + __ Push(r0); + __ Mov(r0, pcrel_address); + __ Mov(pc, r0); + __ Pop(r0); + } else { + Label true_label, false_label; + __ B(Condition(cond), &true_label); + __ B(&false_label); + __ Bind(&true_label); + __ Push(r0); + __ Mov(r0, pcrel_address); + __ Mov(pc, r0); + __ Pop(r0); + __ Bind(&false_label); + } + masm_->FinalizeCode(); +} + +void ArmInstructionRewriter::RewriteThumb_B16(u4 cfg_pc, u2 insn) { + int imm11 = (insn >> 0) & 0x7ff; + int branch_offset = (imm11 & 0x400) ? 0xFFFFF000 : 0; + branch_offset |= imm11 << 1; + u4 pcrel_address = RoundDown(cfg_pc + branch_offset, 4); + if (pcrel_address < tail_pc_) { + EmitT16(insn); + return; + } + Literal target(pcrel_address | 1); + Label skip_literal; + __ Ldr(pc, &target); + __ B(&skip_literal); + __ Place(&target); + __ Bind(&skip_literal); + masm_->FinalizeCode(); +} + + +void ArmInstructionRewriter::RewriteThumb_BX16(ThumbInsnType type, u4 pc, u2 insn) { + Register rm = Reg((insn >> 3) & 0xf); + if (rm.IsPC()) { + // This instruction may cause the CPU status change. + // Thumb ========> ARM + // keep an eye on this. + LOG(WARNING) << "Un-rewritten instruction: bx pc"; + } + EmitT16(insn); +} + +void ArmInstructionRewriter::RewriteThumb_B32(ThumbInsnType type, u4 cfg_pc, u4 insn) { + int sign_bit = (insn >> 26) & 0x1; + int offset_high = (insn >> 16) & 0x3ff; + int link = (insn >> 14) & 0x1; + int j1 = (insn >> 13) & 0x1; + int thumb_mode = (insn >> 12) & 0x1; + int j2 = (insn >> 11) & 0x1; + int offset_low = (insn >> 0) & 0x7ff; + int branch_offset = sign_bit ? 0xFF000000 : 0; + branch_offset |= (j1 ^ sign_bit) ? 0 : 1 << 23; + branch_offset |= (j2 ^ sign_bit) ? 0 : 1 << 22; + branch_offset |= offset_high << 12; + branch_offset |= offset_low << 1; + u4 pcrel_address = cfg_pc + branch_offset; + if (pcrel_address < tail_pc_) { + EmitT32(insn); + } else { + if (link) { + __ Add(lr, pc, 4); + } + if (thumb_mode) { + pcrel_address |= 1; + } + Literal target(pcrel_address); + Label skip_literal; + __ Ldr(pc, &target); + __ B(&skip_literal); + __ Place(&target); + __ Bind(&skip_literal); + masm_->FinalizeCode(); + } +} + +void ArmInstructionRewriter::RewriteThumb_CBZ16(ThumbInsnType type, u4 cfg_pc, u2 insn) { + int n = (insn >> 11) & 0x1; + int imm1 = (insn >> 9) & 0x1; + int imm5 = (insn >> 3) & 0x1f; + Register rn = Reg((insn >> 0) & 0x7); + int branch_offset = (imm1 << 6) | (imm5 << 1); + u4 pcrel_address = RoundDown(cfg_pc + branch_offset, 4) | 1; + if (pcrel_address < tail_pc_) { + EmitT16(insn); + return; + } + Label true_label, false_label; + __ Cbz(rn, &true_label); + __ B(&false_label); + __ Bind(&true_label); + __ Push(r0); + __ Mov(r0, pcrel_address); + __ Mov(pc, r0); + __ Pop(r0); + __ Bind(&false_label); + masm_->FinalizeCode(); +} + +void ArmInstructionRewriter::RewriteThumb_ADD_FROM_PC16(u4 pc, u2 insn) { + Register rd = Reg((insn >> 8) & 0x7); + int imm8 = (insn >> 0) & 0xff; + int offset = imm8 << 2; + u4 pcrel_address = RoundDown(pc + offset, 4); + __ Mov(rd, pcrel_address); +} + +void ArmInstructionRewriter::RewriteArm_LDR(ArmInsnType type, u4 pc, u4 insn) { + int updown = (insn >> 23) & 0x1; + int imm = insn & 0xFFF; + Register rd = Reg((insn & 0xF000) >> 12); + u4 pcrel_address = updown ? pc + imm : pc - imm; + if (pcrel_address < tail_pc_) { + EmitA32(insn); + } else { + __ Mov(rd, pcrel_address); + __ Ldr(rd, MemOperand(rd, 0)); + masm_->FinalizeCode(); + } +} + +void ArmInstructionRewriter::RewriteArm_Add(ArmInsnType type, u4 pc, u4 insn) { + Register rd = Reg((insn & 0xF000) >> 12); + Register rm = Reg(insn & 0xF); + Register scratch_reg = ip; + for (unsigned reg = 0; reg < R7; ++reg) { + if (rd.GetCode() != reg && rm.GetCode() != reg) { + scratch_reg = Reg(reg); + break; + } + } + __ Push(scratch_reg); + __ Mov(scratch_reg, pc); + __ Add(rd, scratch_reg, rd); + __ Pop(scratch_reg); +} + +void ArmInstructionRewriter::RewriteArm_B(ArmInsnType type, u4 cfg_pc, u4 insn) { + u4 offset = (insn >> 0) & 0xffffff; + u4 branch_offset = (offset & 0x800000) ? 0xFC000000 : 0; + branch_offset |= (offset << 2); + u4 pcrel_address = cfg_pc + branch_offset; + if (pcrel_address < tail_pc_) { + EmitA32(insn); + return; + } + if (type == ArmInsnType::kARM_BL) { + __ Add(lr, pc, 4); + } + Literal target(pcrel_address); + Label skip_literal; + __ Ldr(pc, &target); + __ B(&skip_literal); + __ Place(&target); + __ Bind(&skip_literal); + masm_->FinalizeCode(); +} + + +} // namespace arm +} // namespace whale diff --git a/module/src/main/cpp/whale/src/dbi/arm/instruction_rewriter_arm.h b/module/src/main/cpp/whale/src/dbi/arm/instruction_rewriter_arm.h new file mode 100644 index 00000000..c36c0cb8 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/arm/instruction_rewriter_arm.h @@ -0,0 +1,133 @@ +#ifndef ARCH_REWRITER_ARM_H_ +#define ARCH_REWRITER_ARM_H_ + +#include "assembler/vixl/aarch32/macro-assembler-aarch32.h" +#include "dbi/arm/decoder_thumb.h" +#include "dbi/backup_code.h" +#include "dbi/instruction_rewriter.h" +#include "dbi/instruction_set.h" +#include "base/macros.h" +#include "base/primitive_types.h" +#include "decoder_arm.h" + +namespace whale { +namespace arm { + +class InstructionIterator { + public: + explicit InstructionIterator(intptr_t insn, size_t size) : insns_(insn), index_(0), + size_(size) {} + + ~InstructionIterator() = default; + + template + T *GetCurrentRef() { + return reinterpret_cast(insns_ + index_); + } + + template + T GetCurrent() { + return *GetCurrentRef(); + } + + bool HasNext() { + return index_ < size_; + } + + void Step(u4 step) { + DCHECK_LE(index_ + step, size_); + index_ += step; + } + + u4 GetIndex() { + return index_; + } + + private: + intptr_t insns_; + u4 index_; + size_t size_; +}; + +class ArmInstructionRewriter : public InstructionReWriter { + public: + ArmInstructionRewriter(vixl::aarch32::MacroAssembler *masm, BackupCode *code, + u4 origin_pc, u4 tail_pc, bool is_thumb) + : masm_(masm), code_(code), cfg_pc_(origin_pc), tail_pc_(tail_pc), + is_thumb_(is_thumb) {} + + ~ArmInstructionRewriter() { + delete code_; + } + + const InstructionSet GetISA() override { + return InstructionSet::kArm; + } + + void Rewrite() override; + + u4 *GetStartAddress() override { + return masm_->GetBuffer()->GetStartAddress(); + } + + size_t GetCodeSize() override { + return masm_->GetBuffer()->GetSizeInBytes(); + } + + private: + bool is_thumb_; + const u4 cfg_pc_; + const u4 tail_pc_; + vixl::aarch32::MacroAssembler *masm_; + BackupCode *code_; + + void EmitT16(const u2 insn) { + masm_->GetBuffer()->Emit16(insn); + } + + void EmitT32(const u4 insn) { + EmitT16(static_cast(insn >> 16)); + EmitT16(static_cast(insn & 0xffff)); + } + + void EmitA32(const u4 insn) { + masm_->GetBuffer()->Emit32(insn); + } + + void RewriteThumb(); + + void RewriteArm(); + + void RewriteThumb_LDR_PC16(u4 pc, u2 insn); + + void RewriteThumb_DataProcessing16(ThumbInsnType type, u4 cfg_pc, u2 insn); + + void RewriteThumb_B_COND16(u4 cfg_pc, u2 insn); + + void RewriteThumb_B16(u4 pc, u2 insn); + + void RewriteThumb_ADD_FROM_PC16(u4 pc, u2 insn); + + void RewriteThumb_BX16(ThumbInsnType type, u4 pc, u2 insn); + + void RewriteThumb_CBZ16(ThumbInsnType type, u4 pc, u2 insn); + + void RewriteThumb_LDRL32(ThumbInsnType type, u4 pc, u4 insn); + + void RewriteThumb_B32(ThumbInsnType type, u4 pc, u4 insn); + + void RewriteArm_LDR(ArmInsnType type, u4 pc, u4 insn); + + void RewriteArm_Add(ArmInsnType type, u4 pc, u4 insn); + + void RewriteArm_B(ArmInsnType type, u4 pc, u4 insn); +}; + + +} // namespace arm +} // namespace whale + + +#endif // ARCH_REWRITER_ARM_H_ + + diff --git a/module/src/main/cpp/whale/src/dbi/arm/registers_arm.h b/module/src/main/cpp/whale/src/dbi/arm/registers_arm.h new file mode 100644 index 00000000..9114a26b --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/arm/registers_arm.h @@ -0,0 +1,73 @@ +#ifndef ARM64_ARM_CONVENTION_H_ +#define ARM64_ARM_CONVENTION_H_ + +#include +#include "assembler/vixl/aarch32/operands-aarch32.h" + +namespace whale { +namespace arm { + + +enum ArmRegister { + R0 = 0, + R1 = 1, + R2 = 2, + R3 = 3, + R4 = 4, + R5 = 5, + R6 = 6, + R7 = 7, + R8 = 8, + R9 = 9, + R10 = 10, + R11 = 11, + R12 = 12, + R13 = 13, + R14 = 14, + R15 = 15, + MR = 8, + TR = 9, + FP = 11, + IP = 12, + SP = 13, + LR = 14, + PC = 15, + kNumberOfCoreRegisters = 16, + kNoRegister = -1, +}; + +const vixl::aarch32::Register &Reg(int reg) { +#define CASE(x) case R##x: \ + return vixl::aarch32::r##x; + + switch (reg) { + CASE(0) + CASE(1) + CASE(2) + CASE(3) + CASE(4) + CASE(5) + CASE(6) + CASE(7) + CASE(8) + CASE(9) + CASE(10) + CASE(11) + CASE(12) + CASE(13) + CASE(14) + CASE(15) + default: + LOG(FATAL) << "Unexpected register : " << reg; + UNREACHABLE(); + } +#undef CASE +} + +} // namespace arm +} // namespace whale + + +#endif // ARM64_ARM_CONVENTION_H_ + + diff --git a/module/src/main/cpp/whale/src/dbi/arm64/decoder_arm64.cc b/module/src/main/cpp/whale/src/dbi/arm64/decoder_arm64.cc new file mode 100644 index 00000000..884b853d --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/arm64/decoder_arm64.cc @@ -0,0 +1,23 @@ +#include "dbi/arm64/decoder_arm64.h" +#include "base/macros.h" + +#define CASE(mask, val, type) \ +if ((((insn) & (mask)) == val)) { \ + return type; \ +} + +namespace whale { +namespace arm64 { + +A64InsnType DecodeA64(u4 insn) { + CASE(0x7e000000, 0x34000000, kA64_CBZ_CBNZ); + CASE(0xff000010, 0x54000000, kA64_B_COND); + CASE(0x7e000000, 0x36000000, kA64_TBZ_TBNZ); + CASE(0x7c000000, 0x14000000, kA64_B_BL); + CASE(0x3b000000, 0x18000000, kA64_LDR_LIT); + CASE(0x1f000000, 0x10000000, kA64_ADR_ADRP); + return kA64_UNHANDLED; +} + +} // namespace arm64 +} // namespace whale diff --git a/module/src/main/cpp/whale/src/dbi/arm64/decoder_arm64.h b/module/src/main/cpp/whale/src/dbi/arm64/decoder_arm64.h new file mode 100644 index 00000000..5ca71de2 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/arm64/decoder_arm64.h @@ -0,0 +1,25 @@ +#ifndef ARCH_ARM64_DECODER_H_ +#define ARCH_ARM64_DECODER_H_ + +#include "base/primitive_types.h" + +namespace whale { +namespace arm64 { + +enum A64InsnType { + kA64_CBZ_CBNZ, + kA64_B_COND, + kA64_TBZ_TBNZ, + kA64_B_BL, + kA64_LDR_LIT, + kA64_ADR_ADRP, + kA64_UNHANDLED +}; + +A64InsnType DecodeA64(u4 insn); + +} // namespace arm64 +} // namespace whale + +#endif // ARCH_ARM64_DECODER_H_ + diff --git a/module/src/main/cpp/whale/src/dbi/arm64/inline_hook_arm64.cc b/module/src/main/cpp/whale/src/dbi/arm64/inline_hook_arm64.cc new file mode 100644 index 00000000..1d4ea8b2 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/arm64/inline_hook_arm64.cc @@ -0,0 +1,77 @@ +#include +#include "platform/memory.h" +#include "dbi/arm64/inline_hook_arm64.h" +#include "dbi/arm64/registers_arm64.h" +#include "dbi/arm64/instruction_rewriter_arm64.h" +#include "assembler/vixl/aarch64/macro-assembler-aarch64.h" +#include "base/align.h" +#include "base/logging.h" + +#define __ masm. + +namespace whale { +namespace arm64 { + +using namespace vixl::aarch64; // NOLINT + + +void Arm64InlineHook::StartHook() { + DCHECK(address_ != 0 && replace_ != 0); + MacroAssembler masm; + + __ Mov(xTarget, GetReplaceAddress()); + __ Br(xTarget); + + masm.FinalizeCode(); + + size_t backup_size = masm.GetSizeOfCodeGenerated(); + backup_code_ = new BackupCode(GetTarget(), backup_size); + + if (backup_ != nullptr) { + intptr_t tail = address_ + backup_size; + intptr_t trampoline = BuildTrampoline(static_cast(tail)); + *backup_ = trampoline; + } + + ScopedMemoryPatch patch( + GetTarget(), + masm.GetBuffer()->GetStartAddress(), + backup_size + ); +} + +intptr_t +Arm64InlineHook::BuildTrampoline(u8 tail) { + MacroAssembler masm; + + Arm64InstructionRewriter rewriter(&masm, backup_code_, GetTarget(), tail); + rewriter.Rewrite(); + + __ Mov(xTarget, tail); + __ Br(xTarget); + + masm.FinalizeCode(); + + size_t size = masm.GetBuffer()->GetSizeInBytes(); + + trampoline_addr_ = mmap(nullptr, GetPageSize(), PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, 0, 0); + memcpy(trampoline_addr_, masm.GetBuffer()->GetStartAddress(), size); + mprotect(trampoline_addr_, GetPageSize(), PROT_READ | PROT_EXEC); + + return reinterpret_cast(trampoline_addr_); +} + + +void Arm64InlineHook::StopHook() { + size_t code_size = backup_code_->GetSizeInBytes(); + void *insns = backup_code_->GetInstructions(); + ScopedMemoryPatch patch(GetTarget(), insns, code_size); + memcpy(GetTarget(), insns, code_size); + if (trampoline_addr_ != nullptr) { + munmap(trampoline_addr_, GetPageSize()); + } +} + +} // namespace arm64 +} // namespace whale diff --git a/module/src/main/cpp/whale/src/dbi/arm64/inline_hook_arm64.h b/module/src/main/cpp/whale/src/dbi/arm64/inline_hook_arm64.h new file mode 100644 index 00000000..2308d7d9 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/arm64/inline_hook_arm64.h @@ -0,0 +1,35 @@ +#ifndef ARCH_ARM64_INLINEHOOK_ARM64_H_ +#define ARCH_ARM64_INLINEHOOK_ARM64_H_ + +#include "dbi/backup_code.h" +#include "dbi/hook_common.h" +#include "base/primitive_types.h" + +namespace whale { +namespace arm64 { + +class Arm64InlineHook : public InlineHook { + public: + Arm64InlineHook(intptr_t address, intptr_t replace, intptr_t *backup) + : InlineHook(address, replace, backup), backup_code_(nullptr), trampoline_addr_(nullptr) {} + + ~Arm64InlineHook() override { + delete backup_code_; + } + + void StartHook() override; + + void StopHook() override; + + private: + BackupCode *backup_code_; + void *trampoline_addr_; + + intptr_t BuildTrampoline(u8 tail); +}; + +} // namespace arm64 +} // namespace whale + +#endif // ARCH_ARM64_INLINEHOOK_ARM64_H_ + diff --git a/module/src/main/cpp/whale/src/dbi/arm64/instruction_rewriter_arm64.cc b/module/src/main/cpp/whale/src/dbi/arm64/instruction_rewriter_arm64.cc new file mode 100644 index 00000000..0c3ac88e --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/arm64/instruction_rewriter_arm64.cc @@ -0,0 +1,256 @@ +#include "dbi/arm64/instruction_rewriter_arm64.h" +#include "dbi/arm64/decoder_arm64.h" +#include "dbi/arm64/registers_arm64.h" +#include "base/logging.h" + +#define __ masm_-> + +namespace whale { +namespace arm64 { +using namespace vixl::aarch64; // NOLINT + +static inline u8 SignExtend64(unsigned int bits, uint64_t value) { + u8 C = (u8) ((-1) << (bits - (u8) 1)); // NOLINT + return (value + C) ^ C; +} + +void Arm64InstructionRewriter::Rewrite() { + u8 current_pc = cfg_pc_; + u4 *insns = code_->GetInstructions(); + for (int i = 0; i < code_->GetCount(); ++i) { + u4 insn = insns[i]; + A64InsnType type = DecodeA64(insn); + switch (type) { + case A64InsnType::kA64_ADR_ADRP: + RewriteADR_ADRP(current_pc, insn); + break; + case A64InsnType::kA64_B_BL: + RewriteB_BL(current_pc, insn); + break; + case A64InsnType::kA64_B_COND: + RewriteB_COND(current_pc, insn); + break; + case A64InsnType::kA64_CBZ_CBNZ: + RewriteCBZ_CBNZ(current_pc, insn); + break; + case A64InsnType::kA64_TBZ_TBNZ: + RewriteTBZ_TBNZ(current_pc, insn); + break; + case A64InsnType::kA64_LDR_LIT: + RewriteLDR_LIT(current_pc, insn); + break; + default: + EmitCode(insn); + break; + } + current_pc += kArm64InstructionAlignment; + } + masm_->FinalizeCode(); +} + +ALWAYS_INLINE void Arm64InstructionRewriter::RewriteADR_ADRP(u8 pc, u4 insn) { + u4 op = (insn >> 31) & 0x1; + u4 immlo = (insn >> 29) & 0x3; + u4 immhi = (insn >> 5) & 0x7ffff; + auto rd = XReg((insn >> 0) & 0x1f); + u8 imm = SignExtend64(12, (immhi << 2) | immlo); + u8 pcrel_address; + if (op == 0) { // adr + pcrel_address = (uint64_t) pc; + } else { // adrp + imm = imm << 12; + pcrel_address = (uint64_t) pc & ~0xFFF; + } + pcrel_address += imm; + __ Mov(rd, pcrel_address); +} + +ALWAYS_INLINE void Arm64InstructionRewriter::RewriteB_BL(u8 pc, u4 insn) { + u4 op = (insn >> 31) & 0x1; + u4 imm26 = (insn >> 0) & 0x3ffffff; + u8 branch_offset = (SignExtend64(26, imm26) << 2); + u8 target = pc + branch_offset; + + // Check if the jump target still in rewrite range of code, + // in this cause, there is no need to rewrite it. + if (target < tail_pc_) { + EmitCode(insn); + } else { + if (op == 1) { // bl + __ Mov(lr, pc + kArm64InstructionAlignment); + } + __ Mov(xTarget, target); + __ Br(xTarget); + } +} + +ALWAYS_INLINE void Arm64InstructionRewriter::RewriteCBZ_CBNZ(u8 pc, u4 insn) { + u4 sf = (insn >> 31) & 0x1; + u4 op = (insn >> 24) & 0x1; + u4 imm19 = (insn >> 5) & 0x7ffff; + u4 rt_value = (insn >> 0) & 0x1f; + u8 branch_offset = SignExtend64(19, imm19) << 2; + u8 pcrel_address = pc + branch_offset; + + if (pcrel_address < tail_pc_) { + EmitCode(insn); + } else { + auto rt = sf ? XReg(rt_value) : WReg(rt_value); + + Label true_label, false_label; + + if (op == 1) { + __ Cbnz(rt, &true_label); + } else { + __ Cbz(rt, &true_label); + } + __ B(&false_label); + + __ Bind(&true_label); + __ Mov(xTarget, pcrel_address); + __ Br(xTarget); + + __ Bind(&false_label); + + masm_->FinalizeCode(); + } +} + +void Arm64InstructionRewriter::RewriteB_COND(u8 pc, u4 insn) { + u4 imm19 = (insn >> 5) & 0x7ffff; + u4 cond = (insn >> 0) & 0xf; + u8 branch_offset = SignExtend64(19, imm19) << 2; + u8 pcrel_address = pc + branch_offset; + + if (pcrel_address < tail_pc_) { + EmitCode(insn); + } else { + Label true_label, false_label; + + __ B(Condition(cond), &true_label); + __ B(&false_label); + + __ Bind(&true_label); + __ Mov(xTarget, pcrel_address); + __ Br(xTarget); + + __ Bind(&false_label); + } +} + +ALWAYS_INLINE void Arm64InstructionRewriter::RewriteTBZ_TBNZ(u8 pc, u4 insn) { + u4 b5 = (insn >> 31) & 0x1; + u4 op = (insn >> 24) & 0x1; + u4 b40 = (insn >> 19) & 0x1f; + u4 imm14 = (insn >> 5) & 0x3fff; + u4 rt_value = (insn >> 0) & 0x1f; + u8 branch_offset = SignExtend64(14, imm14) << 2; + u4 bit = (b5 << 5) | b40; + u8 pcrel_address = pc + branch_offset; + + if (pcrel_address < tail_pc_) { + EmitCode(insn); + } else { + auto rt = b5 ? XReg(rt_value) : WReg(rt_value); + + Label true_label, false_label; + + if (op == 1) { + __ Tbnz(rt, bit, &true_label); + } else { + __ Tbz(rt, bit, &true_label); + } + __ B(&false_label); + + __ Bind(&true_label); + __ Mov(xTarget, pcrel_address); + __ Br(xTarget); + + __ Bind(&false_label); + } +} + +/* + * LOAD LITERAL + * ---------------------------------------------------------------- + * opc V Instruction Variant + * ---------------------------------------------------------------- + * 00 0 LDR (literal) 32-bit variant on page C6-527 + * 01 0 LDR (literal) 64-bit variant on page C6-527 + * 10 0 LDRSW (literal) - + * 11 0 PRFM (literal) - + * + * 00 1 LDR (literal, SIMD&FP) 32-bit variant on page C7-1027 + * 01 1 LDR (literal, SIMD&FP) 64-bit variant on page C7-1027 + * 10 1 LDR (literal, SIMD&FP) 128-bit variant on page C7-1027 + */ +ALWAYS_INLINE void Arm64InstructionRewriter::RewriteLDR_LIT(u8 pc, u4 insn) { + u4 opc = (insn >> 30) & 0x3; + u4 v = (insn >> 26) & 0x1; + u4 imm19 = (insn >> 5) & 0x7ffff; + u4 rt = (insn >> 0) & 0x1f; + uint64_t offset = SignExtend64(19, imm19) << 2; + u8 pcrel_address = pc + offset; + + if (pcrel_address < tail_pc_) { + EmitCode(insn); + return; + } + if (v == 0) { + auto wt = WReg(rt); + auto xt = XReg(rt); + switch (opc) { + case 0: // LDR literal 32-bit variant + __ Mov(xt, pcrel_address); + __ Ldr(wt, MemOperand(wt, 0, Offset)); + break; + case 1: // LDR literal 64-bit variant + __ Mov(xt, pcrel_address); + __ Ldr(xt, MemOperand(xt, 0, Offset)); + break; + case 2: // LDR Signed Word (literal) + __ Mov(xt, pcrel_address); + __ Ldrsw(xt, MemOperand(xt, 0, Offset)); + break; + case 3: // PRFM Prefetch + __ Push(x0); + __ Mov(x0, pcrel_address); + __ Ldrsw(x0, MemOperand(xt, 0, Offset)); + __ Pop(x0); + break; + default: + LOG(WARNING) << "Unallocated ldr(literal) opc : " << opc; + EmitCode(insn); + break; + } + } else if (v == 1) { + __ Push(x0); + __ Mov(x0, pcrel_address); + switch (opc) { + case 0: // LDR (literal, SIMD&FP) 32-bit variant + __ Ldr(SReg(0), MemOperand(x0, 0, Offset)); + break; + case 1: // LDR (literal, SIMD&FP) 64-bit variant + __ Ldr(DReg(0), MemOperand(x0, 0, Offset)); + break; + case 2: // LDR (literal, SIMD&FP) 128-bit variant + __ Ldr(VReg(0), MemOperand(x0, 0, Offset)); + break; + default: + LOG(WARNING) << "Unallocated ldr(literal, SIMD&FP) opc : " << opc; + EmitCode(insn); + break; + } + __ Pop(x0); + + } else { + LOG(WARNING) << "Unallocated ldr_literal v : " << v; + EmitCode(insn); + } +} + + +} // namespace arm64 +} // namespace whale + + diff --git a/module/src/main/cpp/whale/src/dbi/arm64/instruction_rewriter_arm64.h b/module/src/main/cpp/whale/src/dbi/arm64/instruction_rewriter_arm64.h new file mode 100644 index 00000000..5d142aa6 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/arm64/instruction_rewriter_arm64.h @@ -0,0 +1,66 @@ +#ifndef ARCH_REWRITER_ARM64_H_ +#define ARCH_REWRITER_ARM64_H_ + +#include "assembler/vixl/aarch64/macro-assembler-aarch64.h" +#include "dbi/backup_code.h" +#include "dbi/instruction_rewriter.h" +#include "dbi/instruction_set.h" +#include "base/primitive_types.h" +#include "base/macros.h" + +namespace whale { +namespace arm64 { + +class Arm64InstructionRewriter : public InstructionReWriter { + public: + Arm64InstructionRewriter(vixl::aarch64::MacroAssembler *masm, BackupCode *code, + u8 origin_pc, u8 tail_pc) + : masm_(masm), code_(code), cfg_pc_(origin_pc), tail_pc_(tail_pc) {} + + ~Arm64InstructionRewriter() = default; + + const InstructionSet GetISA() override { + return InstructionSet::kArm64; + } + + void Rewrite() override; + + ALWAYS_INLINE u4 *GetStartAddress() override { + return masm_->GetBuffer()->GetStartAddress(); + } + + ALWAYS_INLINE size_t GetCodeSize() override { + return masm_->GetBuffer()->GetSizeInBytes(); + } + + private: + const u8 cfg_pc_; + const u8 tail_pc_; + vixl::aarch64::MacroAssembler *masm_; + BackupCode *code_; + + ALWAYS_INLINE void EmitCode(u4 insn) { + masm_->GetBuffer()->Emit32(insn); + } + + void RewriteADR_ADRP(u8 pc, u4 insn); + + void RewriteB_BL(u8 pc, u4 insn); + + void RewriteCBZ_CBNZ(u8 pc, u4 insn); + + void RewriteB_COND(u8 pc, u4 insn); + + void RewriteTBZ_TBNZ(u8 pc, u4 insn); + + void RewriteLDR_LIT(u8 pc, u4 insn); +}; + + +} // namespace arm64 +} // namespace whale + + +#endif // ARCH_REWRITER_ARM64_H_ + + diff --git a/module/src/main/cpp/whale/src/dbi/arm64/registers_arm64.h b/module/src/main/cpp/whale/src/dbi/arm64/registers_arm64.h new file mode 100644 index 00000000..497ed3e5 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/arm64/registers_arm64.h @@ -0,0 +1,230 @@ +#ifndef ARM64_ARM64_CONVENTION_H_ +#define ARM64_ARM64_CONVENTION_H_ + +#include +#include "assembler/vixl/aarch64/operands-aarch64.h" + +namespace whale { +namespace arm64 { + +enum XRegister { + X0 = 0, + X1 = 1, + X2 = 2, + X3 = 3, + X4 = 4, + X5 = 5, + X6 = 6, + X7 = 7, + X8 = 8, + X9 = 9, + X10 = 10, + X11 = 11, + X12 = 12, + X13 = 13, + X14 = 14, + X15 = 15, + X16 = 16, + X17 = 17, + X18 = 18, + X19 = 19, + X20 = 20, + X21 = 21, + X22 = 22, + X23 = 23, + X24 = 24, + X25 = 25, + X26 = 26, + X27 = 27, + X28 = 28, + X29 = 29, + X30 = 30, + SP = 31, + XZR = 32, + kNumberOfXRegisters = 33, + FP = X29, + LR = X30, + kNoRegister = -1, +}; + +enum WRegister { + W0 = 0, + W1 = 1, + W2 = 2, + W3 = 3, + W4 = 4, + W5 = 5, + W6 = 6, + W7 = 7, + W8 = 8, + W9 = 9, + W10 = 10, + W11 = 11, + W12 = 12, + W13 = 13, + W14 = 14, + W15 = 15, + W16 = 16, + W17 = 17, + W18 = 18, + W19 = 19, + W20 = 20, + W21 = 21, + W22 = 22, + W23 = 23, + W24 = 24, + W25 = 25, + W26 = 26, + W27 = 27, + W28 = 28, + W29 = 29, + W30 = 30, + WSP = 31, + WZR = 32, + kNumberOfWRegisters = 33, + kNoWRegister = -1, +}; + +enum DRegister { + D0 = 0, + D1 = 1, + D2 = 2, + D3 = 3, + D4 = 4, + D5 = 5, + D6 = 6, + D7 = 7, + D8 = 8, + D9 = 9, + D10 = 10, + D11 = 11, + D12 = 12, + D13 = 13, + D14 = 14, + D15 = 15, + D16 = 16, + D17 = 17, + D18 = 18, + D19 = 19, + D20 = 20, + D21 = 21, + D22 = 22, + D23 = 23, + D24 = 24, + D25 = 25, + D26 = 26, + D27 = 27, + D28 = 28, + D29 = 29, + D30 = 30, + D31 = 31, + kNumberOfDRegisters = 32, + kNoDRegister = -1, +}; + +std::ostream &operator<<(std::ostream &os, const DRegister &rhs); + +enum SRegister { + S0 = 0, + S1 = 1, + S2 = 2, + S3 = 3, + S4 = 4, + S5 = 5, + S6 = 6, + S7 = 7, + S8 = 8, + S9 = 9, + S10 = 10, + S11 = 11, + S12 = 12, + S13 = 13, + S14 = 14, + S15 = 15, + S16 = 16, + S17 = 17, + S18 = 18, + S19 = 19, + S20 = 20, + S21 = 21, + S22 = 22, + S23 = 23, + S24 = 24, + S25 = 25, + S26 = 26, + S27 = 27, + S28 = 28, + S29 = 29, + S30 = 30, + S31 = 31, + kNumberOfSRegisters = 32, + kNoSRegister = -1, +}; + +enum VRegister { + V0 = 0, + V1 = 1, + V2 = 2, + V3 = 3, + V4 = 4, + V5 = 5, + V6 = 6, + V7 = 7, + V8 = 8, + V9 = 9, + V10 = 10, + V11 = 11, + V12 = 12, + V13 = 13, + V14 = 14, + V15 = 15, + V16 = 16, + V17 = 17, + V18 = 18, + V19 = 19, + V20 = 20, + V21 = 21, + V22 = 22, + V23 = 23, + V24 = 24, + V25 = 25, + V26 = 26, + V27 = 27, + V28 = 28, + V29 = 29, + V30 = 30, + V31 = 31, + kNumberOfVRegisters = 32, + kNoVRegister = -1, +}; + +static vixl::aarch64::XRegister xTarget = vixl::aarch64::x17; + + +inline const vixl::aarch64::Register &XReg(unsigned int reg) { + return vixl::aarch64::XRegister::GetXRegFromCode(reg); +} + +inline const vixl::aarch64::Register &WReg(unsigned int reg) { + return vixl::aarch64::XRegister::GetWRegFromCode(reg); +} + +inline const vixl::aarch64::VRegister &SReg(unsigned int reg) { + return vixl::aarch64::FPRegister::GetSRegFromCode(reg); +} + +inline const vixl::aarch64::VRegister &DReg(unsigned int reg) { + return vixl::aarch64::FPRegister::GetDRegFromCode(reg); +} + +inline const vixl::aarch64::VRegister &VReg(unsigned int reg) { + return vixl::aarch64::FPRegister::GetVRegFromCode(reg); +} + +} // namespace arm64 +} // namespace whale + + +#endif // ARM64_ARM64_CONVENTION_H_ + + diff --git a/module/src/main/cpp/whale/src/dbi/backup_code.h b/module/src/main/cpp/whale/src/dbi/backup_code.h new file mode 100644 index 00000000..8baafabc --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/backup_code.h @@ -0,0 +1,56 @@ +#ifndef ARCH_BACKUP_CODE_H_ +#define ARCH_BACKUP_CODE_H_ + +#include +#include "base/primitive_types.h" +#include "base/macros.h" +#include "base/logging.h" + + +namespace whale { + +class BackupCode { + public: + BackupCode(const void *address, const size_t size) : size_(size) { + insns_ = malloc(size); + memcpy(insns_, address, size); + } + + ~BackupCode() { + free(insns_); + } + + size_t GetSizeInBytes() { + return size_; + } + + size_t GetCount(size_t insn_size) { + return size_ / insn_size; + } + + template + size_t GetCount() { + return GetCount(sizeof(T)); + } + + template + T *GetInstructions() { + return reinterpret_cast(insns_); + } + + intptr_t GetInstructions() { + return reinterpret_cast(insns_); + } + + private: + void *insns_; + size_t size_; + + private: + DISALLOW_COPY_AND_ASSIGN(BackupCode); +}; + +} // namespace whale + +#endif // ARCH_BACKUP_CODE_H_ + diff --git a/module/src/main/cpp/whale/src/dbi/darwin/macho_import_hook.cc b/module/src/main/cpp/whale/src/dbi/darwin/macho_import_hook.cc new file mode 100644 index 00000000..6c9bf94d --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/darwin/macho_import_hook.cc @@ -0,0 +1,128 @@ +#include +#include "interceptor.h" +#include "dbi/darwin/macho_import_hook.h" +#include "macho_import_hook.h" + + +namespace whale { +namespace darwin { + +static void NewDylibCallback(const struct mach_header *mh, intptr_t vmaddr_slide) { + Interceptor::Instance()->TraverseHooks([&](std::unique_ptr &hook) { + if (hook->GetType() == HookType::kImportHook) { + MachoImportHook *import_hook = dynamic_cast(hook.get()); + import_hook->ImportHookOneMachO(reinterpret_cast(mh), vmaddr_slide); + } + }); +}; + +void MachoImportHook::StartHook() { + static Singleton register_dyld_once([](void *instance) -> void { + _dyld_register_func_for_add_image(NewDylibCallback); + }); + register_dyld_once.Ensure(); + for (u4 i = 0; i < _dyld_image_count(); ++i) { + ImportHookOneMachO( + reinterpret_cast(_dyld_get_image_header(i)), + _dyld_get_image_vmaddr_slide(i) + ); + } +} + +void MachoImportHook::ImportHookOneMachO(const macho_header *mh, intptr_t slide) { + void **address = GetImportAddress( + mh, + slide + ); + if (address != nullptr) { + address_map_.insert(std::make_pair(address, *address)); + *address = replace_; + } + if (backup_ != nullptr) { + *backup_ = dlsym(RTLD_DEFAULT, symbol_name_); + } +} + +void MachoImportHook::StopHook() { + for (auto &entry : address_map_) { + void **address = entry.first; + void *origin_function = entry.second; + *address = origin_function; + } +} + +void **MachoImportHook::GetImportAddress(const macho_header *mh, intptr_t slide) { + macho_nlist *symbol_table = nullptr; + const char *string_table = nullptr; + u1 *link_edit_base = nullptr; + u4 *indirect_symbol_table = nullptr; + + u4 cmd_count = mh->ncmds; + load_command *first_cmd = OffsetOf(const_cast(mh), sizeof(macho_header)); + load_command *cmd = first_cmd; + for (int i = 0; i < cmd_count; ++i) { + if (cmd->cmd == LC_SEGMENT_COMMAND) { + macho_segment_command *seg = reinterpret_cast(cmd); + if (!strcmp(seg->segname, SEG_LINKEDIT)) { + link_edit_base = reinterpret_cast(seg->vmaddr + slide - seg->fileoff); + break; + } + } + cmd = OffsetOf(cmd, cmd->cmdsize); + } + cmd = first_cmd; + for (int i = 0; i < cmd_count; ++i) { + switch (cmd->cmd) { + case LC_SYMTAB: { + symtab_command *symtab = reinterpret_cast(cmd); + string_table = (char *) &link_edit_base[symtab->stroff]; + symbol_table = (macho_nlist *) (&link_edit_base[symtab->symoff]); + break; + } + case LC_DYSYMTAB: { + dysymtab_command *dsymtab = reinterpret_cast(cmd); + indirect_symbol_table = (u4 *) (&link_edit_base[dsymtab->indirectsymoff]); + break; + } + default: + break; + } + cmd = OffsetOf(cmd, cmd->cmdsize); + } + cmd = first_cmd; + for (int i = 0; i < cmd_count; ++i) { + if (cmd->cmd == LC_SEGMENT_COMMAND) { + macho_segment_command *seg = reinterpret_cast(cmd); + macho_section *sect_start = OffsetOf(seg, sizeof(macho_segment_command)); + macho_section *sect_end = §_start[seg->nsects]; + macho_section *sect; + for (sect = sect_start; sect < sect_end; ++sect) { + int type = sect->flags & SECTION_TYPE; + if (type == S_LAZY_DYLIB_SYMBOL_POINTERS + || type == S_LAZY_SYMBOL_POINTERS + || type == S_NON_LAZY_SYMBOL_POINTERS) { + + size_t ptr_count = sect->size / sizeof(void *); + void **symbol_pointers = reinterpret_cast(sect->addr + slide); + uint32_t indirect_table_offset = sect->reserved1; + for (int lazy_index = 0; lazy_index < ptr_count; lazy_index++) { + uint32_t symbol_index = indirect_symbol_table[indirect_table_offset + lazy_index]; + if (symbol_index != INDIRECT_SYMBOL_ABS && symbol_index != INDIRECT_SYMBOL_LOCAL) { + const char *current_symbol_name = &string_table[symbol_table[symbol_index].n_un.n_strx]; + if (!strcmp(current_symbol_name, symbol_name_)) { + void **result = symbol_pointers + lazy_index; + return result; + } + } + } // end sym foreach + } + + } // end section foreach + } + cmd = OffsetOf(cmd, cmd->cmdsize); + } // end cmd foreach + return nullptr; +} + +} // namespace darwin +} // namespace whale diff --git a/module/src/main/cpp/whale/src/dbi/darwin/macho_import_hook.h b/module/src/main/cpp/whale/src/dbi/darwin/macho_import_hook.h new file mode 100644 index 00000000..675d9dfe --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/darwin/macho_import_hook.h @@ -0,0 +1,50 @@ +#ifndef WHALE_DBI_MACHO_IMPORT_HOOK_H_ +#define WHALE_DBI_MACHO_IMPORT_HOOK_H_ + +#include "dbi/hook_common.h" +#include +#include +#include +#include "base/cxx_helper.h" +#include "base/singleton.h" + +#ifndef S_LAZY_DYLIB_SYMBOL_POINTERS +#define S_LAZY_DYLIB_SYMBOL_POINTERS 0x10 +#endif + +#if __LP64__ +#define LC_SEGMENT_COMMAND LC_SEGMENT_64 +typedef mach_header_64 macho_header; +typedef section_64 macho_section; +typedef nlist_64 macho_nlist; +typedef segment_command_64 macho_segment_command; +#else +#define LC_SEGMENT_COMMAND LC_SEGMENT +typedef mach_header macho_header; +typedef section macho_section; +typedef nlist macho_nlist; +typedef segment_command macho_segment_command; +#endif + +namespace whale { +namespace darwin { + +class MachoImportHook final : public ImportHook { + public: + MachoImportHook(const char *symbol_name, void *replace, void **backup) + : ImportHook(symbol_name, replace, backup) {} + + void StartHook() override; + + void StopHook() override; + + void ImportHookOneMachO(const macho_header *mh, intptr_t slide); + + private: + void **GetImportAddress(const macho_header *mh, intptr_t slide); +}; + +} // namespace darwin +} // namespace whale + +#endif // WHALE_DBI_MACHO_IMPORT_HOOK_H_ diff --git a/module/src/main/cpp/whale/src/dbi/hook_common.cc b/module/src/main/cpp/whale/src/dbi/hook_common.cc new file mode 100644 index 00000000..4d2445be --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/hook_common.cc @@ -0,0 +1,53 @@ +#include "dbi/hook_common.h" +#include "hook_common.h" + +#if defined(linux) + +#include "platform/linux/process_map.h" + +#endif + +namespace whale { + +bool FindStdLibCallback(const char *path, bool *stop) { +#if defined(linux) + if (strstr(path, "system/") && strstr(path, "/libc.so")) { + *stop = true; + return true; + } +#elif defined(__APPLE__) + if (strstr(path, "libc.dylib")) { + *stop = true; + return true; + } +#endif + return false; +} + +InterceptSysCallHook::InterceptSysCallHook(MemoryRangeCallback callback) + : Hook() { + if (callback == nullptr) { + callback = FindStdLibCallback; + } + callback_ = callback; +} + +void InterceptSysCallHook::StartHook() { + bool stop_foreach = false; +#if defined(linux) + ForeachMemoryRange( + [&](uintptr_t begin, uintptr_t end, char *perm, char *mapname) -> bool { + if (strstr(perm, "x") && strstr(perm, "r")) { + if (callback_(mapname, &stop_foreach)) { + FindSysCalls(begin, end); + } + } + return stop_foreach; + }); +#endif +} + +void InterceptSysCallHook::StopHook() { + +} +} // namespace whale diff --git a/module/src/main/cpp/whale/src/dbi/hook_common.h b/module/src/main/cpp/whale/src/dbi/hook_common.h new file mode 100644 index 00000000..859bae97 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/hook_common.h @@ -0,0 +1,112 @@ +#ifndef WHALE_ARCH_HOOK_H_ +#define WHALE_ARCH_HOOK_H_ + +#include +#include +#include "dbi/instruction_set.h" + +namespace whale { + +enum class HookType { + kNone, + kInlineHook, + kImportHook, +}; + +class Hook { + public: + int id_{}; + + virtual ~Hook() = default; + + virtual InstructionSet getISA() { + return InstructionSet::kNone; + } + + virtual HookType GetType() { + return HookType::kNone; + } + + virtual void StartHook() = 0; + + virtual void StopHook() = 0; +}; + +class ImportHook : public Hook { + public: + ImportHook(const char *symbol_name, void *replace, void **backup) + : symbol_name_(symbol_name), + replace_(replace), + backup_(backup) {} + + virtual ~ImportHook() override = default; + + HookType GetType() override { + return HookType::kImportHook; + } + + const char *GetSymbolName() { + return symbol_name_; + } + + template + ALWAYS_INLINE T GetReplaceAddress() { + return (T) replace_; + } + + protected: + const char *symbol_name_; + std::map address_map_; + void *replace_; + void **backup_; +}; + +class InlineHook : public Hook { + public: + InlineHook(intptr_t address, intptr_t replace, intptr_t *backup) + : address_(address), + replace_(replace), + backup_(backup) {} + + virtual ~InlineHook() override = default; + + template + ALWAYS_INLINE T GetTarget() { + return (T) address_; + } + + template + ALWAYS_INLINE T GetReplaceAddress() { + return (T) replace_; + } + + HookType GetType() override { + return HookType::kInlineHook; + } + + protected: + intptr_t address_; + intptr_t replace_; + intptr_t *backup_; +}; + +typedef bool (*MemoryRangeCallback)(const char *path, bool *stop); + +class InterceptSysCallHook : public Hook { + public: + InterceptSysCallHook(MemoryRangeCallback callback); + + void StartHook(); + + void StopHook(); + + protected: + virtual void FindSysCalls(uintptr_t start, uintptr_t end) = 0; + + MemoryRangeCallback callback_; +}; + +} // namespace whale + +#endif // WHALE_ARCH_HOOK_H_ + diff --git a/module/src/main/cpp/whale/src/dbi/instruction_rewriter.h b/module/src/main/cpp/whale/src/dbi/instruction_rewriter.h new file mode 100644 index 00000000..51c726e6 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/instruction_rewriter.h @@ -0,0 +1,30 @@ +#ifndef ARCH_INSTRUCTION_REWRITER_H_ +#define ARCH_INSTRUCTION_REWRITER_H_ + +#include "assembler/vixl/code-buffer-vixl.h" +#include "dbi/instruction_set.h" + +namespace whale { + +using CodeBuffer = vixl::CodeBuffer; + +template +class InstructionReWriter { + + virtual const InstructionSet GetISA() { + return InstructionSet::kNone; + } + + virtual InsnType *GetStartAddress() = 0; + + virtual size_t GetCodeSize() = 0; + + virtual void Rewrite() = 0; + +}; + +} // namespace whale + +#endif // ARCH_INSTRUCTION_REWRITER_H_ + + diff --git a/module/src/main/cpp/whale/src/dbi/instruction_set.cc b/module/src/main/cpp/whale/src/dbi/instruction_set.cc new file mode 100644 index 00000000..768b9ffc --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/instruction_set.cc @@ -0,0 +1,77 @@ +#include +#include +#include "dbi/instruction_set.h" + +namespace whale { + +std::ostream &operator<<(std::ostream &os, const InstructionSet &rhs) { + return os << GetInstructionSetString(rhs); +} + +void InstructionSetAbort(InstructionSet isa) { + switch (isa) { + case InstructionSet::kArm: + case InstructionSet::kThumb2: + case InstructionSet::kArm64: + case InstructionSet::kX86: + case InstructionSet::kX86_64: + case InstructionSet::kMips: + case InstructionSet::kMips64: + case InstructionSet::kNone: + LOG(FATAL) << "Unsupported instruction set " << isa; + UNREACHABLE(); + } + LOG(FATAL) << "Unknown ISA " << isa; + UNREACHABLE(); +} + +const char *GetInstructionSetString(InstructionSet isa) { + switch (isa) { + case InstructionSet::kArm: + case InstructionSet::kThumb2: + return "arm"; + case InstructionSet::kArm64: + return "arm64"; + case InstructionSet::kX86: + return "x86"; + case InstructionSet::kX86_64: + return "x86_64"; + case InstructionSet::kMips: + return "mips"; + case InstructionSet::kMips64: + return "mips64"; + case InstructionSet::kNone: + return "none"; + } + LOG(FATAL) << "Unknown ISA " << isa; + UNREACHABLE(); +} + + +size_t GetInstructionSetAlignment(InstructionSet isa) { + switch (isa) { + case InstructionSet::kArm: + // Fall-through. + case InstructionSet::kThumb2: + return kArmAlignment; + case InstructionSet::kArm64: + return kArm64Alignment; + case InstructionSet::kX86: + // Fall-through. + case InstructionSet::kX86_64: + return kX86Alignment; + case InstructionSet::kMips: + // Fall-through. + case InstructionSet::kMips64: + return kMipsAlignment; + case InstructionSet::kNone: + LOG(FATAL) << "ISA kNone does not have alignment."; + UNREACHABLE(); + } + LOG(FATAL) << "Unknown ISA " << isa; + UNREACHABLE(); +} + + +} // namespace whale + diff --git a/module/src/main/cpp/whale/src/dbi/instruction_set.h b/module/src/main/cpp/whale/src/dbi/instruction_set.h new file mode 100644 index 00000000..9419d768 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/instruction_set.h @@ -0,0 +1,110 @@ +#ifndef WHALE_ARCH_INSTRUCTION_SET_H_ +#define WHALE_ARCH_INSTRUCTION_SET_H_ + +#include "base/macros.h" +#include + +namespace whale { + +enum class InstructionSet { + kNone, + kArm, + kArm64, + kThumb2, + kX86, + kX86_64, + kMips, + kMips64, + kLast = kMips64 +}; + +std::ostream &operator<<(std::ostream &os, const InstructionSet &rhs); + +#if defined(__arm__) +static constexpr InstructionSet kRuntimeISA = InstructionSet::kArm; +#elif defined(__aarch64__) || defined(__arm64__) +static constexpr InstructionSet kRuntimeISA = InstructionSet::kArm64; +#elif defined(__mips__) && !defined(__LP64__) +static constexpr InstructionSet kRuntimeISA = InstructionSet::kMips; +#elif defined(__mips__) && defined(__LP64__) +static constexpr InstructionSet kRuntimeISA = InstructionSet::kMips64; +#elif defined(__i386__) +static constexpr InstructionSet kRuntimeISA = InstructionSet::kX86; +#elif defined(__x86_64__) +static constexpr InstructionSet kRuntimeISA = InstructionSet::kX86_64; +#else +static constexpr InstructionSet kRuntimeISA = InstructionSet::kNone; +#endif + +static constexpr size_t kPointerSize = sizeof(void *); + +static constexpr size_t kArmAlignment = 8; + +static constexpr size_t kArm64Alignment = 16; + +static constexpr size_t kMipsAlignment = 8; + +static constexpr size_t kX86Alignment = 16; + +static constexpr size_t kArmInstructionAlignment = 4; +static constexpr size_t kThumb2InstructionAlignment = 2; +static constexpr size_t kArm64InstructionAlignment = 4; +static constexpr size_t kX86InstructionAlignment = 1; +static constexpr size_t kX86_64InstructionAlignment = 1; +static constexpr size_t kMipsInstructionAlignment = 4; +static constexpr size_t kMips64InstructionAlignment = 4; + +const char *GetInstructionSetString(InstructionSet isa); + +// Note: Returns kNone when the string cannot be parsed to a known value. +InstructionSet GetInstructionSetFromString(const char *instruction_set); + +// Fatal logging out of line to keep the header clean of logging.h. +NO_RETURN void InstructionSetAbort(InstructionSet isa); + + +constexpr bool Is64BitInstructionSet(InstructionSet isa) { + switch (isa) { + case InstructionSet::kArm: + case InstructionSet::kThumb2: + case InstructionSet::kX86: + case InstructionSet::kMips: + return false; + + case InstructionSet::kArm64: + case InstructionSet::kX86_64: + case InstructionSet::kMips64: + return true; + + case InstructionSet::kNone: + break; + } + InstructionSetAbort(isa); +} + +constexpr size_t GetInstructionSetInstructionAlignment(InstructionSet isa) { + switch (isa) { + case InstructionSet::kArm: + // Fall-through. + case InstructionSet::kThumb2: + return kThumb2InstructionAlignment; + case InstructionSet::kArm64: + return kArm64InstructionAlignment; + case InstructionSet::kX86: + return kX86InstructionAlignment; + case InstructionSet::kX86_64: + return kX86_64InstructionAlignment; + case InstructionSet::kMips: + return kMipsInstructionAlignment; + case InstructionSet::kMips64: + return kMips64InstructionAlignment; + + case InstructionSet::kNone: + break; + } + InstructionSetAbort(isa); +} + +} // namespace whale + +#endif // WHALE_ARCH_INSTRUCTION_SET_H_ diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/config.h b/module/src/main/cpp/whale/src/dbi/x86/distorm/config.h new file mode 100644 index 00000000..f67bfe04 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/config.h @@ -0,0 +1,180 @@ +/* +config.h + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ + + +#ifndef CONFIG_H +#define CONFIG_H + +/* diStorm version number. */ +#define __DISTORMV__ 0x030400 + +#include /* memset, memcpy - can be easily self implemented for libc independency. */ + +#include "distorm.h" + + +/* + * 64 bit offsets support: + * This macro should be defined from compiler command line flags, e.g: -DSUPPORT_64BIT_OFFSET + * Note: make sure that the caller (library user) defines it too! + */ +#define SUPPORT_64BIT_OFFSET + +/* + * If you compile diStorm as a dynamic library (.dll or .so) file, make sure you uncomment the next line. + * So the interface functions will be exported, otherwise they are useable only for static library. + * For example, this macro is being set for compiling diStorm as a .dll for Python with CTypes. + */ +/* #define DISTORM_DYNAMIC */ + +/* + * If DISTORM_LIGHT is defined, everything involved in formatting the instructions + * as text will be excluded from compilation. + * distorm_decode(..) and distorm_format(..) will not be available. + * This will decrease the size of the executable and leave you with decomposition functionality only. + * + * Note: it should be either set in the preprocessor definitions manually or in command line -D switch. + */ +//#define DISTORM_LIGHT + +/* + * diStorm now supports little/big endian CPU's. + * It should detect the endianness according to predefined macro's of the compiler. + * If you don't use GCC/MSVC you will have to define it on your own. + */ + +/* These macros are used in order to make the code portable. */ +#ifdef __GNUC__ + +#include + +#define _DLLEXPORT_ +#define _FASTCALL_ +#define _INLINE_ static +/* GCC ignores this directive... */ +/*#define _FASTCALL_ __attribute__((__fastcall__))*/ + +/* Set endianity (supposed to be LE though): */ +#ifdef __BIG_ENDIAN__ + #define BE_SYSTEM +#endif + +/* End of __GCC__ */ + +#elif __WATCOMC__ + +#include + +#define _DLLEXPORT_ +#define _FASTCALL_ +#define _INLINE_ __inline + +/* End of __WATCOMC__ */ + +#elif __DMC__ + +#include + +#define _DLLEXPORT_ +#define _FASTCALL_ +#define _INLINE_ __inline + +/* End of __DMC__ */ + +#elif __TINYC__ + +#include + +#define _DLLEXPORT_ +#define _FASTCALL_ +#define _INLINE_ + +/* End of __TINYC__ */ + +#elif _MSC_VER + +/* stdint alternative is defined in distorm.h */ + +#define _DLLEXPORT_ __declspec(dllexport) +#define _FASTCALL_ __fastcall +#define _INLINE_ __inline + +/* Set endianity (supposed to be LE though): */ +#if !defined(_M_IX86) && !defined(_M_X64) + #define BE_SYSTEM +#endif + +#endif /* #elif _MSC_VER */ + +/* If the library isn't compiled as a dynamic library don't export any functions. */ +#ifndef DISTORM_DYNAMIC +#undef _DLLEXPORT_ +#define _DLLEXPORT_ +#endif + +#ifndef FALSE +#define FALSE 0 +#endif +#ifndef TRUE +#define TRUE 1 +#endif + +/* Define stream read functions for big endian systems. */ +#ifdef BE_SYSTEM + +/* Avoid defining 'static static' for GCC. */ +#ifndef __GNUC__ +#define STATIC_INLINE static _INLINE_ +#else +#define STATIC_INLINE static +#endif + +/* + * Assumption: These functions can read from the stream safely! + * Swap endianity of input to little endian. + */ +STATIC_INLINE int16_t RSHORT(const uint8_t *s) +{ + return s[0] | (s[1] << 8); +} +STATIC_INLINE uint16_t RUSHORT(const uint8_t *s) +{ + return s[0] | (s[1] << 8); +} +STATIC_INLINE int32_t RLONG(const uint8_t *s) +{ + return s[0] | (s[1] << 8) | (s[2] << 16) | (s[3] << 24); +} +STATIC_INLINE uint32_t RULONG(const uint8_t *s) +{ + return s[0] | (s[1] << 8) | (s[2] << 16) | (s[3] << 24); +} +STATIC_INLINE int64_t RLLONG(const uint8_t *s) +{ + return s[0] | (s[1] << 8) | (s[2] << 16) | (s[3] << 24) | ((uint64_t)s[4] << 32) | ((uint64_t)s[5] << 40) | ((uint64_t)s[6] << 48) | ((uint64_t)s[7] << 56); +} +STATIC_INLINE uint64_t RULLONG(const uint8_t *s) +{ + return s[0] | (s[1] << 8) | (s[2] << 16) | (s[3] << 24) | ((uint64_t)s[4] << 32) | ((uint64_t)s[5] << 40) | ((uint64_t)s[6] << 48) | ((uint64_t)s[7] << 56); +} + +#undef STATIC_INLINE + +#else +/* Little endian macro's will just make the cast. */ +#define RSHORT(x) *(int16_t *)x +#define RUSHORT(x) *(uint16_t *)x +#define RLONG(x) *(int32_t *)x +#define RULONG(x) *(uint32_t *)x +#define RLLONG(x) *(int64_t *)x +#define RULLONG(x) *(uint64_t *)x +#endif + +#endif /* CONFIG_H */ diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/decoder.c b/module/src/main/cpp/whale/src/dbi/x86/distorm/decoder.c new file mode 100644 index 00000000..32f65c7a --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/decoder.c @@ -0,0 +1,650 @@ +/* +decoder.c + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ + + +#include "decoder.h" +#include "instructions.h" +#include "insts.h" +#include "prefix.h" +#include "x86defs.h" +#include "operands.h" +#include "insts.h" +#include "mnemonics.h" + + +/* Instruction Prefixes - Opcode - ModR/M - SIB - Displacement - Immediate */ + +static _DecodeType decode_get_effective_addr_size(_DecodeType dt, _iflags decodedPrefixes) +{ + /* + * This table is to map from the current decoding mode to an effective address size: + * Decode16 -> Decode32 + * Decode32 -> Decode16 + * Decode64 -> Decode32 + */ + static _DecodeType AddrSizeTable[] = {Decode32Bits, Decode16Bits, Decode32Bits}; + + /* Switch to non default mode if prefix exists, only for ADDRESS SIZE. */ + if (decodedPrefixes & INST_PRE_ADDR_SIZE) dt = AddrSizeTable[dt]; + return dt; +} + +static _DecodeType decode_get_effective_op_size(_DecodeType dt, _iflags decodedPrefixes, unsigned int rex, _iflags instFlags) +{ + /* + * This table is to map from the current decoding mode to an effective operand size: + * Decode16 -> Decode32 + * Decode32 -> Decode16 + * Decode64 -> Decode16 + * Not that in 64bits it's a bit more complicated, because of REX and promoted instructions. + */ + static _DecodeType OpSizeTable[] = {Decode32Bits, Decode16Bits, Decode16Bits}; + + if (decodedPrefixes & INST_PRE_OP_SIZE) return OpSizeTable[dt]; + + if (dt == Decode64Bits) { + /* + * REX Prefix toggles data size to 64 bits. + * Operand size prefix toggles data size to 16. + * Default data size is 32 bits. + * Promoted instructions are 64 bits if they don't require a REX perfix. + * Non promoted instructions are 64 bits if the REX prefix exists. + */ + /* Automatically promoted instructions have only INST_64BITS SET! */ + if (((instFlags & (INST_64BITS | INST_PRE_REX)) == INST_64BITS) || + /* Other instructions in 64 bits can be promoted only with a REX prefix. */ + ((decodedPrefixes & INST_PRE_REX) && (rex & PREFIX_EX_W))) dt = Decode64Bits; + else dt = Decode32Bits; /* Default. */ + } + return dt; +} + +/* A helper macro to convert from diStorm's CPU flags to EFLAGS. */ +#define CONVERT_FLAGS_TO_EFLAGS(dst, src, field) dst->field = ((src->field & D_COMPACT_SAME_FLAGS) | \ + ((src->field & D_COMPACT_IF) ? D_IF : 0) | \ + ((src->field & D_COMPACT_DF) ? D_DF : 0) | \ + ((src->field & D_COMPACT_OF) ? D_OF : 0)); + +static _DecodeResult decode_inst(_CodeInfo* ci, _PrefixState* ps, _DInst* di) +{ + /* Remember whether the instruction is privileged. */ + uint16_t privilegedFlag = 0; + + /* The ModR/M byte of the current instruction. */ + unsigned int modrm = 0; + + /* The REX/VEX prefix byte value. */ + unsigned int vrex = ps->vrex; + + /* + * Backup original input, so we can use it later if a problem occurs + * (like not enough data for decoding, invalid opcode, etc). + */ + const uint8_t* startCode = ci->code; + + /* Holds the info about the current found instruction. */ + _InstInfo* ii = NULL; + _InstInfo iip; /* Privileged instruction cache. */ + _InstSharedInfo* isi = NULL; + + /* Used only for special CMP instructions which have pseudo opcodes suffix. */ + unsigned char cmpType = 0; + + /* + * Indicates whether it is right to LOCK the instruction by decoding its first operand. + * Only then you know if it's ok to output the LOCK prefix's text... + * Used for first operand only. + */ + int lockable = FALSE; + + /* Calculate (and cache) effective-operand-size and effective-address-size only once. */ + _DecodeType effOpSz, effAdrSz; + _iflags instFlags; + + ii = inst_lookup(ci, ps); + if (ii == NULL) goto _Undecodable; + isi = &InstSharedInfoTable[ii->sharedIndex]; + instFlags = FlagsTable[isi->flagsIndex]; + privilegedFlag = ii->opcodeId & OPCODE_ID_PRIVILEGED; + + if (privilegedFlag) { + /* + * Copy the privileged instruction info so we can remove the privileged bit + * from the opcodeId field. This makes sure we're not modifying the tables + * in case we lookup this privileged instruction later. + */ + iip = *ii; + iip.opcodeId &= ~OPCODE_ID_PRIVILEGED; + ii = &iip; + } + + /* + * If both REX and OpSize are available we will have to disable the OpSize, because REX has precedence. + * However, only if REX.W is set ! + * We had to wait with this test, since the operand size may be a mandatory prefix, + * and we know it only after prefetching. + */ + if ((ps->prefixExtType == PET_REX) && + (ps->decodedPrefixes & INST_PRE_OP_SIZE) && + (!ps->isOpSizeMandatory) && + (vrex & PREFIX_EX_W)) { + ps->decodedPrefixes &= ~INST_PRE_OP_SIZE; + prefixes_ignore(ps, PFXIDX_OP_SIZE); + } + + /* + * In this point we know the instruction we are about to decode and its operands (unless, it's an invalid one!), + * so it makes it the right time for decoding-type suitability testing. + * Which practically means, don't allow 32 bits instructions in 16 bits decoding mode, but do allow + * 16 bits instructions in 32 bits decoding mode, of course... + + * NOTE: Make sure the instruction set for 32 bits has explicitly this specific flag set. + * NOTE2: Make sure the instruction set for 64 bits has explicitly this specific flag set. + + * If this is the case, drop what we've got and restart all over after DB'ing that byte. + + * Though, don't drop an instruction which is also supported in 16 and 32 bits. + */ + + /* ! ! ! DISABLED UNTIL FURTHER NOTICE ! ! ! Decode16Bits CAN NOW DECODE 32 BITS INSTRUCTIONS ! ! !*/ + /* if (ii && (dt == Decode16Bits) && (instFlags & INST_32BITS) && (~instFlags & INST_16BITS)) ii = NULL; */ + + /* Drop instructions which are invalid in 64 bits. */ + if ((ci->dt == Decode64Bits) && (instFlags & INST_INVALID_64BITS)) goto _Undecodable; + + /* If it's only a 64 bits instruction drop it in other decoding modes. */ + if ((ci->dt != Decode64Bits) && (instFlags & INST_64BITS_FETCH)) goto _Undecodable; + + if (instFlags & INST_MODRM_REQUIRED) { + /* If the ModRM byte is not part of the opcode, skip the last byte code, so code points now to ModRM. */ + if (~instFlags & INST_MODRM_INCLUDED) { + ci->code++; + if (--ci->codeLen < 0) goto _Undecodable; + } + modrm = *ci->code; + + /* Some instructions enforce that reg=000, so validate that. (Specifically EXTRQ). */ + if ((instFlags & INST_FORCE_REG0) && (((modrm >> 3) & 7) != 0)) goto _Undecodable; + /* Some instructions enforce that mod=11, so validate that. */ + if ((instFlags & INST_MODRR_REQUIRED) && (modrm < INST_DIVIDED_MODRM)) goto _Undecodable; + } + + ci->code++; /* Skip the last byte we just read (either last opcode's byte code or a ModRM). */ + + /* Cache the effective operand-size and address-size. */ + effOpSz = decode_get_effective_op_size(ci->dt, ps->decodedPrefixes, vrex, instFlags); + effAdrSz = decode_get_effective_addr_size(ci->dt, ps->decodedPrefixes); + + memset(di, 0, sizeof(_DInst)); + di->base = R_NONE; + + /* + * Try to extract the next operand only if the latter exists. + * For example, if there is not first operand, no reason to try to extract second operand... + * I decided that a for-break is better for readability in this specific case than goto. + * Note: do-while with a constant 0 makes the compiler warning about it. + */ + for (;;) { + if (isi->d != OT_NONE) { + if (!operands_extract(ci, di, ii, instFlags, (_OpType)isi->d, ONT_1, modrm, ps, effOpSz, effAdrSz, &lockable)) goto _Undecodable; + } else break; + + if (isi->s != OT_NONE) { + if (!operands_extract(ci, di, ii, instFlags, (_OpType)isi->s, ONT_2, modrm, ps, effOpSz, effAdrSz, NULL)) goto _Undecodable; + } else break; + + /* Use third operand, only if the flags says this InstInfo requires it. */ + if (instFlags & INST_USE_OP3) { + if (!operands_extract(ci, di, ii, instFlags, (_OpType)((_InstInfoEx*)ii)->op3, ONT_3, modrm, ps, effOpSz, effAdrSz, NULL)) goto _Undecodable; + } else break; + + /* Support for a fourth operand is added for (i.e:) INSERTQ instruction. */ + if (instFlags & INST_USE_OP4) { + if (!operands_extract(ci, di, ii, instFlags, (_OpType)((_InstInfoEx*)ii)->op4, ONT_4, modrm, ps, effOpSz, effAdrSz, NULL)) goto _Undecodable; + } + break; + } /* Continue here after all operands were extracted. */ + + /* If it were a 3DNow! instruction, we will have to find the instruction itself now that we got its operands extracted. */ + if (instFlags & INST_3DNOW_FETCH) { + ii = inst_lookup_3dnow(ci); + if (ii == NULL) goto _Undecodable; + isi = &InstSharedInfoTable[ii->sharedIndex]; + instFlags = FlagsTable[isi->flagsIndex]; + } + + /* Check whether pseudo opcode is needed, only for CMP instructions: */ + if (instFlags & INST_PSEUDO_OPCODE) { + if (--ci->codeLen < 0) goto _Undecodable; + cmpType = *ci->code; + ci->code++; + if (instFlags & INST_PRE_VEX) { + /* AVX Comparison type must be between 0 to 32, otherwise Reserved. */ + if (cmpType >= INST_VCMP_MAX_RANGE) goto _Undecodable; + } else { + /* SSE Comparison type must be between 0 to 8, otherwise Reserved. */ + if (cmpType >= INST_CMP_MAX_RANGE) goto _Undecodable; + } + } + + /* + * There's a limit of 15 bytes on instruction length. The only way to violate + * this limit is by putting redundant prefixes before an instruction. + * start points to first prefix if any, otherwise it points to instruction first byte. + */ + if ((ci->code - ps->start) > INST_MAXIMUM_SIZE) goto _Undecodable; /* Drop instruction. */ + + /* + * If we reached here the instruction was fully decoded, we located the instruction in the DB and extracted operands. + * Use the correct mnemonic according to the DT. + * If we are in 32 bits decoding mode it doesn't necessarily mean we will choose mnemonic2, alas, + * it means that if there is a mnemonic2, it will be used. + */ + + /* Start with prefix LOCK. */ + if ((lockable == TRUE) && (instFlags & INST_PRE_LOCK)) { + ps->usedPrefixes |= INST_PRE_LOCK; + di->flags |= FLAG_LOCK; + } else if ((instFlags & INST_PRE_REPNZ) && (ps->decodedPrefixes & INST_PRE_REPNZ)) { + ps->usedPrefixes |= INST_PRE_REPNZ; + di->flags |= FLAG_REPNZ; + } else if ((instFlags & INST_PRE_REP) && (ps->decodedPrefixes & INST_PRE_REP)) { + ps->usedPrefixes |= INST_PRE_REP; + di->flags |= FLAG_REP; + } + + /* If it's JeCXZ the ADDR_SIZE prefix affects them. */ + if ((instFlags & (INST_PRE_ADDR_SIZE | INST_USE_EXMNEMONIC)) == (INST_PRE_ADDR_SIZE | INST_USE_EXMNEMONIC)) { + ps->usedPrefixes |= INST_PRE_ADDR_SIZE; + if (effAdrSz == Decode16Bits) di->opcode = ii->opcodeId; + else if (effAdrSz == Decode32Bits) di->opcode = ((_InstInfoEx*)ii)->opcodeId2; + /* Ignore REX.W in 64bits, JECXZ is promoted. */ + else /* Decode64Bits */ di->opcode = ((_InstInfoEx*)ii)->opcodeId3; + } + + /* LOOPxx instructions are also native instruction, but they are special case ones, ADDR_SIZE prefix affects them. */ + else if ((instFlags & (INST_PRE_ADDR_SIZE | INST_NATIVE)) == (INST_PRE_ADDR_SIZE | INST_NATIVE)) { + di->opcode = ii->opcodeId; + + /* If LOOPxx gets here from 64bits, it must be Decode32Bits because Address Size prefix is set. */ + ps->usedPrefixes |= INST_PRE_ADDR_SIZE; + } + /* + * Note: + * If the instruction is prefixed by operand size we will format it in the non-default decoding mode! + * So there might be a situation that an instruction of 32 bit gets formatted in 16 bits decoding mode. + * Both ways should end up with a correct and expected formatting of the text. + */ + else if (effOpSz == Decode16Bits) { /* Decode16Bits */ + + /* Set operand size. */ + FLAG_SET_OPSIZE(di, Decode16Bits); + + /* + * If it's a special instruction which has two mnemonics, then use the 16 bits one + update usedPrefixes. + * Note: use 16 bits mnemonic if that instruction supports 32 bit or 64 bit explicitly. + */ + if ((instFlags & INST_USE_EXMNEMONIC) && ((instFlags & (INST_32BITS | INST_64BITS)) == 0)) ps->usedPrefixes |= INST_PRE_OP_SIZE; + di->opcode = ii->opcodeId; + } else if (effOpSz == Decode32Bits) { /* Decode32Bits */ + + /* Set operand size. */ + FLAG_SET_OPSIZE(di, Decode32Bits); + + /* Give a chance for special mnemonic instruction in 32 bits decoding. */ + if (instFlags & INST_USE_EXMNEMONIC) { + ps->usedPrefixes |= INST_PRE_OP_SIZE; + /* Is it a special instruction which has another mnemonic for mod=11 ? */ + if (instFlags & INST_MNEMONIC_MODRM_BASED) { + if (modrm >= INST_DIVIDED_MODRM) di->opcode = ii->opcodeId; + else di->opcode = ((_InstInfoEx*)ii)->opcodeId2; + } else di->opcode = ((_InstInfoEx*)ii)->opcodeId2; + } else di->opcode = ii->opcodeId; + } else { /* Decode64Bits, note that some instructions might be decoded in Decode32Bits above. */ + + /* Set operand size. */ + FLAG_SET_OPSIZE(di, Decode64Bits); + + if (instFlags & (INST_USE_EXMNEMONIC | INST_USE_EXMNEMONIC2)) { + /* + * We shouldn't be here for MODRM based mnemonics with a MOD=11, + * because they must not use REX (otherwise it will get to the wrong instruction which share same opcode). + * See XRSTOR and XSAVEOPT. + */ + if ((instFlags & INST_MNEMONIC_MODRM_BASED) && (modrm >= INST_DIVIDED_MODRM)) goto _Undecodable; + + /* Use third mnemonic, for 64 bits. */ + if ((instFlags & INST_USE_EXMNEMONIC2) && (vrex & PREFIX_EX_W)) { + ps->usedPrefixes |= INST_PRE_REX; + di->opcode = ((_InstInfoEx*)ii)->opcodeId3; + } else di->opcode = ((_InstInfoEx*)ii)->opcodeId2; /* Use second mnemonic. */ + } else di->opcode = ii->opcodeId; + } + + /* If it's a native instruction use OpSize Prefix. */ + if ((instFlags & INST_NATIVE) && (ps->decodedPrefixes & INST_PRE_OP_SIZE)) ps->usedPrefixes |= INST_PRE_OP_SIZE; + + /* Check VEX mnemonics: */ + if ((instFlags & INST_PRE_VEX) && + (((((_InstInfoEx*)ii)->flagsEx & INST_MNEMONIC_VEXW_BASED) && (vrex & PREFIX_EX_W)) || + ((((_InstInfoEx*)ii)->flagsEx & INST_MNEMONIC_VEXL_BASED) && (vrex & PREFIX_EX_L)))) { + di->opcode = ((_InstInfoEx*)ii)->opcodeId2; + } + + /* Or is it a special CMP instruction which needs a pseudo opcode suffix ? */ + if (instFlags & INST_PSEUDO_OPCODE) { + /* + * The opcodeId is the offset to the FIRST pseudo compare mnemonic, + * we will have to fix it so it offsets into the corrected mnemonic. + * Therefore, we use another table to fix the offset. + */ + if (instFlags & INST_PRE_VEX) { + /* Use the AVX pseudo compare mnemonics table. */ + di->opcode = ii->opcodeId + VCmpMnemonicOffsets[cmpType]; + } else { + /* Use the SSE pseudo compare mnemonics table. */ + di->opcode = ii->opcodeId + CmpMnemonicOffsets[cmpType]; + } + } + + /* + * Store the address size inside the flags. + * This is necessary for the caller to know the size of rSP when using PUSHA for example. + */ + FLAG_SET_ADDRSIZE(di, effAdrSz); + + /* Copy DST_WR flag. */ + if (instFlags & INST_DST_WR) di->flags |= FLAG_DST_WR; + + /* Set the unused prefixes mask. */ + di->unusedPrefixesMask = prefixes_set_unused_mask(ps); + + /* Fix privileged. Assumes the privilegedFlag is 0x8000 only. */ + di->flags |= privilegedFlag; + + /* Copy instruction meta. */ + di->meta = isi->meta; + if (di->segment == 0) di->segment = R_NONE; + + /* Take into account the O_MEM base register for the mask. */ + if (di->base != R_NONE) di->usedRegistersMask |= _REGISTERTORCLASS[di->base]; + + /* Copy CPU affected flags. */ + CONVERT_FLAGS_TO_EFLAGS(di, isi, modifiedFlagsMask); + CONVERT_FLAGS_TO_EFLAGS(di, isi, testedFlagsMask); + CONVERT_FLAGS_TO_EFLAGS(di, isi, undefinedFlagsMask); + + /* Calculate the size of the instruction we've just decoded. */ + di->size = (uint8_t)((ci->code - startCode) & 0xff); + return DECRES_SUCCESS; + +_Undecodable: /* If the instruction couldn't be decoded for some reason, drop the first byte. */ + memset(di, 0, sizeof(_DInst)); + di->base = R_NONE; + + di->size = 1; + /* Clean prefixes just in case... */ + ps->usedPrefixes = 0; + + /* Special case for WAIT instruction: If it's dropped, you have to return a valid instruction! */ + if (*startCode == INST_WAIT_INDEX) { + di->opcode = I_WAIT; + META_SET_ISC(di, ISC_INTEGER); + return DECRES_SUCCESS; + } + + /* Mark that we didn't manage to decode the instruction well, caller will drop it. */ + return DECRES_INPUTERR; +} + +/* + * decode_internal + * + * supportOldIntr - Since now we work with new structure instead of the old _DecodedInst, we are still interested in backward compatibility. + * So although, the array is now of type _DInst, we want to read it in jumps of the old array element's size. + * This is in order to save memory allocation for conversion between the new and the old structures. + * It really means we can do the conversion in-place now. + */ +_DecodeResult decode_internal(_CodeInfo* _ci, int supportOldIntr, _DInst result[], unsigned int maxResultCount, unsigned int* usedInstructionsCount) +{ + _PrefixState ps; + unsigned int prefixSize; + _CodeInfo ci; + unsigned int features; + unsigned int mfc; + + _OffsetType codeOffset = _ci->codeOffset; + const uint8_t* code = _ci->code; + int codeLen = _ci->codeLen; + + /* + * This is used for printing only, it is the real offset of where the whole instruction begins. + * We need this variable in addition to codeOffset, because prefixes might change the real offset an instruction begins at. + * So we keep track of both. + */ + _OffsetType startInstOffset = 0; + + const uint8_t* p; + + /* Current working decoded instruction in results. */ + unsigned int nextPos = 0; + _DInst *pdi = NULL; + + _OffsetType addrMask = (_OffsetType)-1; + + _DecodeResult decodeResult; + +#ifdef DISTORM_LIGHT + supportOldIntr; /* Unreferenced. */ + + /* + * Only truncate address if we are using the decompose interface. + * Otherwise, we use the textual interface which needs full addresses for formatting bytes output. + * So distorm_format will truncate later. + */ + if (_ci->features & DF_MAXIMUM_ADDR32) addrMask = 0xffffffff; + else if (_ci->features & DF_MAXIMUM_ADDR16) addrMask = 0xffff; +#endif + + /* No entries are used yet. */ + *usedInstructionsCount = 0; + ci.dt = _ci->dt; + _ci->nextOffset = codeOffset; + + /* Decode instructions as long as we have what to decode/enough room in entries. */ + while (codeLen > 0) { + + /* startInstOffset holds the displayed offset of current instruction. */ + startInstOffset = codeOffset; + + memset(&ps, 0, (size_t)((char*)&ps.pfxIndexer[0] - (char*)&ps)); + memset(ps.pfxIndexer, PFXIDX_NONE, sizeof(int) * PFXIDX_MAX); + ps.start = code; + ps.last = code; + prefixSize = 0; + + if (prefixes_is_valid(*code, ci.dt)) { + prefixes_decode(code, codeLen, &ps, ci.dt); + /* Count prefixes, start points to first prefix. */ + prefixSize = (unsigned int)(ps.last - ps.start); + /* + * It might be that we will just notice that we ran out of bytes, or only prefixes + * so we will have to drop everything and halt. + * Also take into consideration of flow control instruction filter. + */ + codeLen -= prefixSize; + if ((codeLen == 0) || (prefixSize == INST_MAXIMUM_SIZE)) { + if (~_ci->features & DF_RETURN_FC_ONLY) { + /* Make sure there is enough room. */ + if (nextPos + (ps.last - code) > maxResultCount) return DECRES_MEMORYERR; + + for (p = code; p < ps.last; p++, startInstOffset++) { + /* Use next entry. */ +#ifndef DISTORM_LIGHT + if (supportOldIntr) { + pdi = (_DInst*)((char*)result + nextPos * sizeof(_DecodedInst)); + } + else +#endif /* DISTORM_LIGHT */ + { + pdi = &result[nextPos]; + } + nextPos++; + memset(pdi, 0, sizeof(_DInst)); + + pdi->flags = FLAG_NOT_DECODABLE; + pdi->imm.byte = *p; + pdi->size = 1; + pdi->addr = startInstOffset & addrMask; + } + *usedInstructionsCount = nextPos; /* Include them all. */ + } + if (codeLen == 0) break; /* Bye bye, out of bytes. */ + } + code += prefixSize; + codeOffset += prefixSize; + + /* If we got only prefixes continue to next instruction. */ + if (prefixSize == INST_MAXIMUM_SIZE) continue; + } + + /* + * Now we decode the instruction and only then we do further prefixes handling. + * This is because the instruction could not be decoded at all, or an instruction requires + * a mandatory prefix, or some of the prefixes were useless, etc... + + * Even if there were a mandatory prefix, we already took into account its size as a normal prefix. + * so prefixSize includes that, and the returned size in pdi is simply the size of the real(=without prefixes) instruction. + */ + if (ci.dt == Decode64Bits) { + if (ps.decodedPrefixes & INST_PRE_REX) { + /* REX prefix must precede first byte of instruction. */ + if (ps.rexPos != (code - 1)) { + ps.decodedPrefixes &= ~INST_PRE_REX; + ps.prefixExtType = PET_NONE; + prefixes_ignore(&ps, PFXIDX_REX); + } + /* + * We will disable operand size prefix, + * if it exists only after decoding the instruction, since it might be a mandatory prefix. + * This will be done after calling inst_lookup in decode_inst. + */ + } + /* In 64 bits, segment overrides of CS, DS, ES and SS are ignored. So don't take'em into account. */ + if (ps.decodedPrefixes & INST_PRE_SEGOVRD_MASK32) { + ps.decodedPrefixes &= ~INST_PRE_SEGOVRD_MASK32; + prefixes_ignore(&ps, PFXIDX_SEG); + } + } + + /* Make sure there is at least one more entry to use, for the upcoming instruction. */ + if (nextPos + 1 > maxResultCount) return DECRES_MEMORYERR; +#ifndef DISTORM_LIGHT + if (supportOldIntr) { + pdi = (_DInst*)((char*)result + nextPos * sizeof(_DecodedInst)); + } + else +#endif /* DISTORM_LIGHT */ + { + pdi = &result[nextPos]; + } + nextPos++; + + /* + * The reason we copy these two again is because we have to keep track on the input ourselves. + * There might be a case when an instruction is invalid, and then it will be counted as one byte only. + * But that instruction already read a byte or two from the stream and only then returned the error. + * Thus, we end up unsynchronized on the stream. + * This way, we are totally safe, because we keep track after the call to decode_inst, using the returned size. + */ + ci.code = code; + ci.codeLen = codeLen; + /* Nobody uses codeOffset in the decoder itself, so spare it. */ + + decodeResult = decode_inst(&ci, &ps, pdi); + + /* See if we need to filter this instruction. */ + if ((_ci->features & DF_RETURN_FC_ONLY) && (META_GET_FC(pdi->meta) == FC_NONE)) decodeResult = DECRES_FILTERED; + + /* Set address to the beginning of the instruction. */ + pdi->addr = startInstOffset & addrMask; + /* pdi->disp &= addrMask; */ + + if ((decodeResult == DECRES_INPUTERR) && (ps.decodedPrefixes & INST_PRE_VEX)) { + if (ps.prefixExtType == PET_VEX3BYTES) { + prefixSize -= 2; + codeLen += 2; + } else if (ps.prefixExtType == PET_VEX2BYTES) { + prefixSize -= 1; + codeLen += 1; + } + ps.last = ps.start + prefixSize - 1; + code = ps.last + 1; + codeOffset = startInstOffset + prefixSize; + } else { + /* Advance to next instruction. */ + codeLen -= pdi->size; + codeOffset += pdi->size; + code += pdi->size; + + /* Instruction's size should include prefixes. */ + pdi->size += (uint8_t)prefixSize; + } + + /* Drop all prefixes and the instruction itself, because the instruction wasn't successfully decoded. */ + if ((decodeResult == DECRES_INPUTERR) && (~_ci->features & DF_RETURN_FC_ONLY)) { + nextPos--; /* Undo last result. */ + if ((prefixSize + 1) > 0) { /* 1 for the first instruction's byte. */ + if ((nextPos + prefixSize + 1) > maxResultCount) return DECRES_MEMORYERR; + + for (p = ps.start; p < ps.last + 1; p++, startInstOffset++) { + /* Use next entry. */ +#ifndef DISTORM_LIGHT + if (supportOldIntr) { + pdi = (_DInst*)((char*)result + nextPos * sizeof(_DecodedInst)); + } + else +#endif /* DISTORM_LIGHT */ + { + pdi = &result[nextPos]; + } + nextPos++; + + memset(pdi, 0, sizeof(_DInst)); + pdi->flags = FLAG_NOT_DECODABLE; + pdi->imm.byte = *p; + pdi->size = 1; + pdi->addr = startInstOffset & addrMask; + } + } + } else if (decodeResult == DECRES_FILTERED) nextPos--; /* Return it to pool, since it was filtered. */ + + /* Alright, the caller can read, at least, up to this one. */ + *usedInstructionsCount = nextPos; + /* Fix next offset. */ + _ci->nextOffset = codeOffset; + + /* Check whether we need to stop on any flow control instruction. */ + features = _ci->features; + mfc = META_GET_FC(pdi->meta); + if ((decodeResult == DECRES_SUCCESS) && (features & DF_STOP_ON_FLOW_CONTROL)) { + if (((features & DF_STOP_ON_CALL) && (mfc == FC_CALL)) || + ((features & DF_STOP_ON_RET) && (mfc == FC_RET)) || + ((features & DF_STOP_ON_SYS) && (mfc == FC_SYS)) || + ((features & DF_STOP_ON_UNC_BRANCH) && (mfc == FC_UNC_BRANCH)) || + ((features & DF_STOP_ON_CND_BRANCH) && (mfc == FC_CND_BRANCH)) || + ((features & DF_STOP_ON_INT) && (mfc == FC_INT)) || + ((features & DF_STOP_ON_CMOV) && (mfc == FC_CMOV))) + return DECRES_SUCCESS; + } + } + + return DECRES_SUCCESS; +} diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/decoder.h b/module/src/main/cpp/whale/src/dbi/x86/distorm/decoder.h new file mode 100644 index 00000000..2f9961a5 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/decoder.h @@ -0,0 +1,33 @@ +/* +decoder.h + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2011 Gil Dabah + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see +*/ + + +#ifndef DECODER_H +#define DECODER_H + +#include "config.h" + +typedef unsigned int _iflags; + +_DecodeResult decode_internal(_CodeInfo* ci, int supportOldIntr, _DInst result[], unsigned int maxResultCount, unsigned int* usedInstructionsCount); + +#endif /* DECODER_H */ diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/distorm.c b/module/src/main/cpp/whale/src/dbi/x86/distorm/distorm.c new file mode 100644 index 00000000..18c60a20 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/distorm.c @@ -0,0 +1,456 @@ +/* +distorm.c + +diStorm3 C Library Interface +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ + + +#include "distorm.h" +#include "config.h" +#include "decoder.h" +#include "x86defs.h" +#include "textdefs.h" +#include "wstring.h" +#include "mnemonics.h" + +/* C DLL EXPORTS */ +#ifdef SUPPORT_64BIT_OFFSET +_DLLEXPORT_ _DecodeResult +distorm_decompose64(_CodeInfo *ci, _DInst result[], unsigned int maxInstructions, + unsigned int *usedInstructionsCount) +#else +_DLLEXPORT_ _DecodeResult distorm_decompose32(_CodeInfo* ci, _DInst result[], unsigned int maxInstructions, unsigned int* usedInstructionsCount) +#endif +{ + if (usedInstructionsCount == NULL) { + return DECRES_SUCCESS; + } + + /* DECRES_SUCCESS still may indicate we may have something in the result, so zero it first thing. */ + *usedInstructionsCount = 0; + + if ((ci == NULL) || + (ci->codeLen < 0) || + ((ci->dt != Decode16Bits) && (ci->dt != Decode32Bits) && (ci->dt != Decode64Bits)) || + (ci->code == NULL) || + (result == NULL) || + ((ci->features & (DF_MAXIMUM_ADDR16 | DF_MAXIMUM_ADDR32)) == + (DF_MAXIMUM_ADDR16 | DF_MAXIMUM_ADDR32))) { + return DECRES_INPUTERR; + } + + /* Assume length=0 is success. */ + if (ci->codeLen == 0) { + return DECRES_SUCCESS; + } + + return decode_internal(ci, FALSE, result, maxInstructions, usedInstructionsCount); +} + +#ifndef DISTORM_LIGHT + +/* Helper function to concatenate an explicit size when it's unknown from the operands. */ +static void distorm_format_size(_WString *str, const _DInst *di, int opNum) { + int isSizingRequired = 0; + /* + * We only have to output the size explicitly if it's not clear from the operands. + * For example: + * mov al, [0x1234] -> The size is 8, we know it from the AL register operand. + * mov [0x1234], 0x11 -> Now we don't know the size. Pam pam pam + * + * If given operand number is higher than 2, then output the size anyways. + */ + isSizingRequired = ((opNum >= 2) || ((di->ops[0].type != O_REG) && (di->ops[1].type != O_REG))); + + /* Still not sure? Try some special instructions. */ + if (!isSizingRequired) { + /* + * INS/OUTS are exception, because DX is a port specifier and not a real src/dst register. + * A few exceptions that always requires sizing: + * MOVZX, MOVSX, MOVSXD. + * ROL, ROR, RCL, RCR, SHL, SHR, SAL, SAR. + * SHLD, SHRD. + */ + switch (di->opcode) { + case I_INS: + case I_OUTS: + case I_MOVZX: + case I_MOVSX: + case I_MOVSXD: + case I_ROL: + case I_ROR: + case I_RCL: + case I_RCR: + case I_SHL: + case I_SHR: + case I_SAL: + case I_SAR: + case I_SHLD: + case I_SHRD: + isSizingRequired = 1; + break; + default: /* Instruction doesn't require sizing. */ break; + } + } + + if (isSizingRequired) { + switch (di->ops[opNum].size) { + case 0: + break; /* OT_MEM's unknown size. */ + case 8: + strcat_WSN(str, "BYTE "); + break; + case 16: + strcat_WSN(str, "WORD "); + break; + case 32: + strcat_WSN(str, "DWORD "); + break; + case 64: + strcat_WSN(str, "QWORD "); + break; + case 80: + strcat_WSN(str, "TBYTE "); + break; + case 128: + strcat_WSN(str, "DQWORD "); + break; + case 256: + strcat_WSN(str, "YWORD "); + break; + default: /* Big oh uh if it gets here. */ break; + } + } +} + +static void distorm_format_signed_disp(_WString *str, const _DInst *di, uint64_t addrMask) { + int64_t tmpDisp64; + + if (di->dispSize) { + chrcat_WS(str, ((int64_t) di->disp < 0) ? MINUS_DISP_CHR : PLUS_DISP_CHR); + if ((int64_t) di->disp < 0) tmpDisp64 = -(int64_t) di->disp; + else tmpDisp64 = di->disp; + tmpDisp64 &= addrMask; + str_code_hqw(str, (uint8_t *) &tmpDisp64); + } +} + +#ifdef SUPPORT_64BIT_OFFSET +_DLLEXPORT_ void distorm_format64(const _CodeInfo *ci, const _DInst *di, _DecodedInst *result) +#else +_DLLEXPORT_ void distorm_format32(const _CodeInfo* ci, const _DInst* di, _DecodedInst* result) +#endif +{ + _WString *str; + unsigned int i, isDefault; + int64_t tmpDisp64; + uint64_t addrMask = (uint64_t) -1; + uint8_t segment; + const _WMnemonic *mnemonic; + + /* Set address mask, when default is for 64bits addresses. */ + if (ci->features & DF_MAXIMUM_ADDR32) addrMask = 0xffffffff; + else if (ci->features & DF_MAXIMUM_ADDR16) addrMask = 0xffff; + + /* Copy other fields. */ + result->size = di->size; + result->offset = di->addr; + + if (di->flags == FLAG_NOT_DECODABLE) { + str = &result->mnemonic; + result->offset &= addrMask; + strclear_WS(&result->operands); + strcpy_WSN(str, "DB "); + str_code_hb(str, di->imm.byte); + strclear_WS(&result->instructionHex); + str_hex_b(&result->instructionHex, di->imm.byte); + return; /* Skip to next instruction. */ + } + + str = &result->instructionHex; + strclear_WS(str); + /* Gotta have full address for (di->addr - ci->codeOffset) to work in all modes. */ + for (i = 0; i < di->size; i++) + str_hex_b(str, ci->code[(unsigned int) (di->addr - ci->codeOffset + i)]); + + /* Truncate address now. */ + result->offset &= addrMask; + + str = &result->mnemonic; + switch (FLAG_GET_PREFIX(di->flags)) { + case FLAG_LOCK: + strcpy_WSN(str, "LOCK "); + break; + case FLAG_REP: + /* REP prefix for CMPS and SCAS is really a REPZ. */ + if ((di->opcode == I_CMPS) || (di->opcode == I_SCAS)) strcpy_WSN(str, "REPZ "); + else + strcpy_WSN(str, "REP "); + break; + case FLAG_REPNZ: + strcpy_WSN(str, "REPNZ "); + break; + default: + /* Init mnemonic string, cause next touch is concatenation. */ + strclear_WS(str); + break; + } + + mnemonic = (const _WMnemonic *) &_MNEMONICS[di->opcode]; + memcpy((int8_t *) &str->p[str->length], mnemonic->p, mnemonic->length + 1); + str->length += mnemonic->length; + + /* Format operands: */ + str = &result->operands; + strclear_WS(str); + + /* Special treatment for String instructions. */ + if ((META_GET_ISC(di->meta) == ISC_INTEGER) && + ((di->opcode == I_MOVS) || + (di->opcode == I_CMPS) || + (di->opcode == I_STOS) || + (di->opcode == I_LODS) || + (di->opcode == I_SCAS))) { + /* + * No operands are needed if the address size is the default one, + * and no segment is overridden, so add the suffix letter, + * to indicate size of operation and continue to next instruction. + */ + if ((FLAG_GET_ADDRSIZE(di->flags) == ci->dt) && (SEGMENT_IS_DEFAULT(di->segment))) { + str = &result->mnemonic; + switch (di->ops[0].size) { + case 8: + chrcat_WS(str, 'B'); + break; + case 16: + chrcat_WS(str, 'W'); + break; + case 32: + chrcat_WS(str, 'D'); + break; + case 64: + chrcat_WS(str, 'Q'); + break; + } + return; + } + } + + for (i = 0; ((i < OPERANDS_NO) && (di->ops[i].type != O_NONE)); i++) { + if (i > 0) strcat_WSN(str, ", "); + switch (di->ops[i].type) { + case O_REG: + strcat_WS(str, (const _WString *) &_REGISTERS[di->ops[i].index]); + break; + case O_IMM: + /* If the instruction is 'push', show explicit size (except byte imm). */ + if ((di->opcode == I_PUSH) && (di->ops[i].size != 8)) + distorm_format_size(str, di, i); + /* Special fix for negative sign extended immediates. */ + if ((di->flags & FLAG_IMM_SIGNED) && (di->ops[i].size == 8)) { + if (di->imm.sbyte < 0) { + chrcat_WS(str, MINUS_DISP_CHR); + str_code_hb(str, -di->imm.sbyte); + break; + } + } + if (di->ops[i].size == 64) str_code_hqw(str, (uint8_t *) &di->imm.qword); + else str_code_hdw(str, di->imm.dword); + break; + case O_IMM1: + str_code_hdw(str, di->imm.ex.i1); + break; + case O_IMM2: + str_code_hdw(str, di->imm.ex.i2); + break; + case O_DISP: + distorm_format_size(str, di, i); + chrcat_WS(str, OPEN_CHR); + if ((SEGMENT_GET(di->segment) != R_NONE) && !SEGMENT_IS_DEFAULT(di->segment)) { + strcat_WS(str, (const _WString *) &_REGISTERS[SEGMENT_GET(di->segment)]); + chrcat_WS(str, SEG_OFF_CHR); + } + tmpDisp64 = di->disp & addrMask; + str_code_hqw(str, (uint8_t *) &tmpDisp64); + chrcat_WS(str, CLOSE_CHR); + break; + case O_SMEM: + distorm_format_size(str, di, i); + chrcat_WS(str, OPEN_CHR); + + /* + * This is where we need to take special care for String instructions. + * If we got here, it means we need to explicitly show their operands. + * The problem with CMPS and MOVS is that they have two(!) memory operands. + * So we have to complete it ourselves, since the structure supplies only the segment that can be overridden. + * And make the rest of the String operations explicit. + */ + segment = SEGMENT_GET(di->segment); + isDefault = SEGMENT_IS_DEFAULT(di->segment); + switch (di->opcode) { + case I_MOVS: + isDefault = FALSE; + if (i == 0) segment = R_ES; + break; + case I_CMPS: + isDefault = FALSE; + if (i == 1) segment = R_ES; + break; + case I_INS: + case I_LODS: + case I_STOS: + case I_SCAS: + isDefault = FALSE; + break; + } + if (!isDefault && (segment != R_NONE)) { + strcat_WS(str, (const _WString *) &_REGISTERS[segment]); + chrcat_WS(str, SEG_OFF_CHR); + } + + strcat_WS(str, (const _WString *) &_REGISTERS[di->ops[i].index]); + + distorm_format_signed_disp(str, di, addrMask); + chrcat_WS(str, CLOSE_CHR); + break; + case O_MEM: + distorm_format_size(str, di, i); + chrcat_WS(str, OPEN_CHR); + if ((SEGMENT_GET(di->segment) != R_NONE) && !SEGMENT_IS_DEFAULT(di->segment)) { + strcat_WS(str, (const _WString *) &_REGISTERS[SEGMENT_GET(di->segment)]); + chrcat_WS(str, SEG_OFF_CHR); + } + if (di->base != R_NONE) { + strcat_WS(str, (const _WString *) &_REGISTERS[di->base]); + chrcat_WS(str, PLUS_DISP_CHR); + } + strcat_WS(str, (const _WString *) &_REGISTERS[di->ops[i].index]); + if (di->scale != 0) { + chrcat_WS(str, '*'); + if (di->scale == 2) chrcat_WS(str, '2'); + else if (di->scale == 4) chrcat_WS(str, '4'); + else /* if (di->scale == 8) */ chrcat_WS(str, '8'); + } + + distorm_format_signed_disp(str, di, addrMask); + chrcat_WS(str, CLOSE_CHR); + break; + case O_PC: +#ifdef SUPPORT_64BIT_OFFSET + str_off64(str, (di->imm.sqword + di->addr + di->size) & addrMask); +#else + str_code_hdw(str, ((_OffsetType)di->imm.sdword + di->addr + di->size) & (uint32_t)addrMask); +#endif + break; + case O_PTR: + str_code_hdw(str, di->imm.ptr.seg); + chrcat_WS(str, SEG_OFF_CHR); + str_code_hdw(str, di->imm.ptr.off); + break; + } + } + + if (di->flags & FLAG_HINT_TAKEN) strcat_WSN(str, " ;TAKEN"); + else if (di->flags & FLAG_HINT_NOT_TAKEN) strcat_WSN(str, " ;NOT TAKEN"); +} + + +#ifdef SUPPORT_64BIT_OFFSET +_DLLEXPORT_ _DecodeResult +distorm_decode64(_OffsetType codeOffset, const unsigned char *code, int codeLen, _DecodeType dt, + _DecodedInst result[], unsigned int maxInstructions, + unsigned int *usedInstructionsCount) +#else +_DLLEXPORT_ _DecodeResult distorm_decode32(_OffsetType codeOffset, const unsigned char* code, int codeLen, _DecodeType dt, _DecodedInst result[], unsigned int maxInstructions, unsigned int* usedInstructionsCount) +#endif +{ + _DecodeResult res; + _DInst di; + _CodeInfo ci; + unsigned int instsCount = 0, i; + + *usedInstructionsCount = 0; + + /* I use codeLen as a signed variable in order to ease detection of underflow... and besides - */ + if (codeLen < 0) { + return DECRES_INPUTERR; + } + + if ((dt != Decode16Bits) && (dt != Decode32Bits) && (dt != Decode64Bits)) { + return DECRES_INPUTERR; + } + + if (code == NULL || result == NULL) { + return DECRES_INPUTERR; + } + + /* Assume length=0 is success. */ + if (codeLen == 0) { + return DECRES_SUCCESS; + } + + /* + * We have to format the result into text. But the interal decoder works with the new structure of _DInst. + * Therefore, we will pass the result array(!) from the caller and the interal decoder will fill it in with _DInst's. + * Then we will copy each result to a temporary structure, and use it to reformat that specific result. + * + * This is all done to save memory allocation and to work on the same result array in-place!!! + * It's a bit ugly, I have to admit, but worth it. + */ + + ci.codeOffset = codeOffset; + ci.code = code; + ci.codeLen = codeLen; + ci.dt = dt; + ci.features = DF_NONE; + if (dt == Decode16Bits) ci.features = DF_MAXIMUM_ADDR16; + else if (dt == Decode32Bits) ci.features = DF_MAXIMUM_ADDR32; + + _DInst *r = (_DInst *) result; + res = decode_internal(&ci, TRUE, r, maxInstructions, &instsCount); + for (i = 0; i < instsCount; i++) { + if ((*usedInstructionsCount + i) >= maxInstructions) return DECRES_MEMORYERR; + + /* Copy the current decomposed result to a temp structure, so we can override the result with text. */ + memcpy(&di, (char *) result + (i * sizeof(_DecodedInst)), sizeof(_DInst)); +#ifdef SUPPORT_64BIT_OFFSET + distorm_format64(&ci, &di, &result[i]); +#else + distorm_format32(&ci, &di, &result[i]); +#endif + } + + *usedInstructionsCount = instsCount; + return res; +} + +#endif /* DISTORM_LIGHT */ + +_DInst Decode(uint8_t *code, size_t code_len, unsigned is_64bit) { + _DecodeResult res; + _CodeInfo ci; + _DInst result[1]; + + unsigned int instsCount = 0; + _DecodeType dt = is_64bit ? Decode64Bits : Decode32Bits; + + ci.codeOffset = 0; + ci.code = code; + ci.codeLen = (int) code_len; + ci.dt = dt; + ci.features = DF_NONE; + if (dt == Decode16Bits) ci.features = DF_MAXIMUM_ADDR16; + else if (dt == Decode32Bits) ci.features = DF_MAXIMUM_ADDR32; + + distorm_decompose(&ci, result, 1, &instsCount); + return result[0]; +} + +_DLLEXPORT_ unsigned int distorm_version(void) { + return __DISTORMV__; +} diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/distorm.h b/module/src/main/cpp/whale/src/dbi/x86/distorm/distorm.h new file mode 100644 index 00000000..065b067f --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/distorm.h @@ -0,0 +1,484 @@ +/* diStorm 3.4.0 */ + +/* +distorm.h + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ +#include + +#ifndef DISTORM_H +#define DISTORM_H + +/* + * 64 bit offsets support: + * If the diStorm library you use was compiled with 64 bits offsets, + * make sure you compile your own code with the following macro set: + * SUPPORT_64BIT_OFFSET + * Otherwise comment it out, or you will get a linker error of an unresolved symbol... + * Turned on by default! + */ + +#if !(defined(DISTORM_STATIC) || defined(DISTORM_DYNAMIC)) + /* Define this macro for outer projects by default. */ + #define SUPPORT_64BIT_OFFSET +#endif + +/* TINYC has a problem with some 64bits library functions, so ignore 64 bit offsets. */ +#ifdef __TINYC__ + #undef SUPPORT_64BIT_OFFSET +#endif + +/* If your compiler doesn't support stdint.h, define your own 64 bits type. */ +#ifdef SUPPORT_64BIT_OFFSET + #ifdef _MSC_VER + #define OFFSET_INTEGER unsigned __int64 + #else + #include + #define OFFSET_INTEGER uint64_t + #endif +#else + /* 32 bit offsets are used. */ + #define OFFSET_INTEGER unsigned long +#endif + +#ifdef _MSC_VER +/* Since MSVC isn't shipped with stdint.h, we will have our own: */ +typedef signed __int64 int64_t; +typedef unsigned __int64 uint64_t; +typedef signed __int32 int32_t; +typedef unsigned __int32 uint32_t; +typedef signed __int16 int16_t; +typedef unsigned __int16 uint16_t; +typedef signed __int8 int8_t; +typedef unsigned __int8 uint8_t; +#endif + +/* Support C++ compilers */ +#ifdef __cplusplus + extern "C" { +#endif + + +/* *** Helper Macros *** */ + +/* Get the ISC of the instruction, used with the definitions below. */ +#define META_GET_ISC(meta) (((meta) >> 3) & 0x1f) +#define META_SET_ISC(di, isc) (((di)->meta) |= ((isc) << 3)) +/* Get the flow control flags of the instruction, see 'features for decompose' below. */ +#define META_GET_FC(meta) ((meta) & 0x7) + +/* Get the target address of a branching instruction. O_PC operand type. */ +#define INSTRUCTION_GET_TARGET(di) ((_OffsetType)(((di)->addr + (di)->imm.addr + (di)->size))) +/* Get the target address of a RIP-relative memory indirection. */ +#define INSTRUCTION_GET_RIP_TARGET(di) ((_OffsetType)(((di)->addr + (di)->disp + (di)->size))) + +/* + * Operand Size or Adderss size are stored inside the flags: + * 00 - 16 bits + * 01 - 32 bits + * 10 - 64 bits + * 11 - reserved + * + * If you call these set-macros more than once, you will have to clean the bits before doing so. + */ +#define FLAG_SET_OPSIZE(di, size) ((di->flags) |= (((size) & 3) << 8)) +#define FLAG_SET_ADDRSIZE(di, size) ((di->flags) |= (((size) & 3) << 10)) +#define FLAG_GET_OPSIZE(flags) (((flags) >> 8) & 3) +#define FLAG_GET_ADDRSIZE(flags) (((flags) >> 10) & 3) +/* To get the LOCK/REPNZ/REP prefixes. */ +#define FLAG_GET_PREFIX(flags) ((flags) & 7) +/* Indicates whether the instruction is privileged. */ +#define FLAG_GET_PRIVILEGED(flags) (((flags) & FLAG_PRIVILEGED_INSTRUCTION) != 0) + +/* + * Macros to extract segment registers from 'segment': + */ +#define SEGMENT_DEFAULT 0x80 +#define SEGMENT_SET(di, seg) ((di->segment) |= seg) +#define SEGMENT_GET(segment) (((segment) == R_NONE) ? R_NONE : ((segment) & 0x7f)) +#define SEGMENT_IS_DEFAULT(segment) (((segment) & SEGMENT_DEFAULT) == SEGMENT_DEFAULT) + + +/* Decodes modes of the disassembler, 16 bits or 32 bits or 64 bits for AMD64, x86-64. */ +typedef enum { Decode16Bits = 0, Decode32Bits = 1, Decode64Bits = 2 } _DecodeType; + +typedef OFFSET_INTEGER _OffsetType; + +typedef struct { + _OffsetType codeOffset, nextOffset; /* nextOffset is OUT only. */ + const uint8_t* code; + int codeLen; /* Using signed integer makes it easier to detect an underflow. */ + _DecodeType dt; + unsigned int features; +} _CodeInfo; + +typedef enum { O_NONE, O_REG, O_IMM, O_IMM1, O_IMM2, O_DISP, O_SMEM, O_MEM, O_PC, O_PTR } _OperandType; + +typedef union { + /* Used by O_IMM: */ + int8_t sbyte; + uint8_t byte; + int16_t sword; + uint16_t word; + int32_t sdword; + uint32_t dword; + int64_t sqword; /* All immediates are SIGN-EXTENDED to 64 bits! */ + uint64_t qword; + + /* Used by O_PC: (Use GET_TARGET_ADDR).*/ + _OffsetType addr; /* It's a relative offset as for now. */ + + /* Used by O_PTR: */ + struct { + uint16_t seg; + /* Can be 16 or 32 bits, size is in ops[n].size. */ + uint32_t off; + } ptr; + + /* Used by O_IMM1 (i1) and O_IMM2 (i2). ENTER instruction only. */ + struct { + uint32_t i1; + uint32_t i2; + } ex; +} _Value; + +typedef struct { + /* Type of operand: + O_NONE: operand is to be ignored. + O_REG: index holds global register index. + O_IMM: instruction.imm. + O_IMM1: instruction.imm.ex.i1. + O_IMM2: instruction.imm.ex.i2. + O_DISP: memory dereference with displacement only, instruction.disp. + O_SMEM: simple memory dereference with optional displacement (a single register memory dereference). + O_MEM: complex memory dereference (optional fields: s/i/b/disp). + O_PC: the relative address of a branch instruction (instruction.imm.addr). + O_PTR: the absolute target address of a far branch instruction (instruction.imm.ptr.seg/off). + */ + uint8_t type; /* _OperandType */ + + /* Index of: + O_REG: holds global register index + O_SMEM: holds the 'base' register. E.G: [ECX], [EBX+0x1234] are both in operand.index. + O_MEM: holds the 'index' register. E.G: [EAX*4] is in operand.index. + */ + uint8_t index; + + /* Size in bits of: + O_REG: register + O_IMM: instruction.imm + O_IMM1: instruction.imm.ex.i1 + O_IMM2: instruction.imm.ex.i2 + O_DISP: instruction.disp + O_SMEM: size of indirection. + O_MEM: size of indirection. + O_PC: size of the relative offset + O_PTR: size of instruction.imm.ptr.off (16 or 32) + */ + uint16_t size; +} _Operand; + +#define OPCODE_ID_NONE 0 +/* Instruction could not be disassembled. */ +#define FLAG_NOT_DECODABLE ((uint16_t)-1) +/* The instruction locks memory access. */ +#define FLAG_LOCK (1 << 0) +/* The instruction is prefixed with a REPNZ. */ +#define FLAG_REPNZ (1 << 1) +/* The instruction is prefixed with a REP, this can be a REPZ, it depends on the specific instruction. */ +#define FLAG_REP (1 << 2) +/* Indicates there is a hint taken for Jcc instructions only. */ +#define FLAG_HINT_TAKEN (1 << 3) +/* Indicates there is a hint non-taken for Jcc instructions only. */ +#define FLAG_HINT_NOT_TAKEN (1 << 4) +/* The Imm value is signed extended (E.G in 64 bit decoding mode, a 32 bit imm is usually sign extended into 64 bit imm). */ +#define FLAG_IMM_SIGNED (1 << 5) +/* The destination operand is writable. */ +#define FLAG_DST_WR (1 << 6) +/* The instruction uses RIP-relative indirection. */ +#define FLAG_RIP_RELATIVE (1 << 7) + +/* See flag FLAG_GET_XXX macros above. */ + +/* The instruction is privileged and can only be used from Ring0. */ +#define FLAG_PRIVILEGED_INSTRUCTION (1 << 15) + +/* No register was defined. */ +#define R_NONE ((uint8_t)-1) + +#define REGS64_BASE 0 +#define REGS32_BASE 16 +#define REGS16_BASE 32 +#define REGS8_BASE 48 +#define REGS8_REX_BASE 64 +#define SREGS_BASE 68 +#define FPUREGS_BASE 75 +#define MMXREGS_BASE 83 +#define SSEREGS_BASE 91 +#define AVXREGS_BASE 107 +#define CREGS_BASE 123 +#define DREGS_BASE 132 + +#define OPERANDS_NO (4) + +typedef struct { + /* Used by ops[n].type == O_IMM/O_IMM1&O_IMM2/O_PTR/O_PC. Its size is ops[n].size. */ + _Value imm; + /* Used by ops[n].type == O_SMEM/O_MEM/O_DISP. Its size is dispSize. */ + uint64_t disp; + /* Virtual address of first byte of instruction. */ + _OffsetType addr; + /* General flags of instruction, holds prefixes and more, if FLAG_NOT_DECODABLE, instruction is invalid. */ + uint16_t flags; + /* Unused prefixes mask, for each bit that is set that prefix is not used (LSB is byte [addr + 0]). */ + uint16_t unusedPrefixesMask; + /* Mask of registers that were used in the operands, only used for quick look up, in order to know *some* operand uses that register class. */ + uint32_t usedRegistersMask; + /* ID of opcode in the global opcode table. Use for mnemonic look up. */ + uint16_t opcode; + /* Up to four operands per instruction, ignored if ops[n].type == O_NONE. */ + _Operand ops[OPERANDS_NO]; + /* Size of the whole instruction in bytes. */ + uint8_t size; + /* Segment information of memory indirection, default segment, or overriden one, can be -1. Use SEGMENT macros. */ + uint8_t segment; + /* Used by ops[n].type == O_MEM. Base global register index (might be R_NONE), scale size (2/4/8), ignored for 0 or 1. */ + uint8_t base, scale; + uint8_t dispSize; + /* Meta defines the instruction set class, and the flow control flags. Use META macros. */ + uint8_t meta; + /* The CPU flags that the instruction operates upon. */ + uint16_t modifiedFlagsMask, testedFlagsMask, undefinedFlagsMask; +} _DInst; + +#ifndef DISTORM_LIGHT + +/* Static size of strings. Do not change this value. Keep Python wrapper in sync. */ +#define MAX_TEXT_SIZE (48) +typedef struct { + unsigned int length; + unsigned char p[MAX_TEXT_SIZE]; /* p is a null terminated string. */ +} _WString; + +/* + * Old decoded instruction structure in text format. + * Used only for backward compatibility with diStorm64. + * This structure holds all information the disassembler generates per instruction. + */ +typedef struct { + _WString mnemonic; /* Mnemonic of decoded instruction, prefixed if required by REP, LOCK etc. */ + _WString operands; /* Operands of the decoded instruction, up to 3 operands, comma-seperated. */ + _WString instructionHex; /* Hex dump - little endian, including prefixes. */ + unsigned int size; /* Size of decoded instruction in bytes. */ + _OffsetType offset; /* Start offset of the decoded instruction. */ +} _DecodedInst; + +#endif /* DISTORM_LIGHT */ + +/* Register masks for quick look up, each mask indicates one of a register-class that is being used in some operand. */ +#define RM_AX 1 /* AL, AH, AX, EAX, RAX */ +#define RM_CX 2 /* CL, CH, CX, ECX, RCX */ +#define RM_DX 4 /* DL, DH, DX, EDX, RDX */ +#define RM_BX 8 /* BL, BH, BX, EBX, RBX */ +#define RM_SP 0x10 /* SPL, SP, ESP, RSP */ +#define RM_BP 0x20 /* BPL, BP, EBP, RBP */ +#define RM_SI 0x40 /* SIL, SI, ESI, RSI */ +#define RM_DI 0x80 /* DIL, DI, EDI, RDI */ +#define RM_FPU 0x100 /* ST(0) - ST(7) */ +#define RM_MMX 0x200 /* MM0 - MM7 */ +#define RM_SSE 0x400 /* XMM0 - XMM15 */ +#define RM_AVX 0x800 /* YMM0 - YMM15 */ +#define RM_CR 0x1000 /* CR0, CR2, CR3, CR4, CR8 */ +#define RM_DR 0x2000 /* DR0, DR1, DR2, DR3, DR6, DR7 */ +#define RM_R8 0x4000 /* R8B, R8W, R8D, R8 */ +#define RM_R9 0x8000 /* R9B, R9W, R9D, R9 */ +#define RM_R10 0x10000 /* R10B, R10W, R10D, R10 */ +#define RM_R11 0x20000 /* R11B, R11W, R11D, R11 */ +#define RM_R12 0x40000 /* R12B, R12W, R12D, R12 */ +#define RM_R13 0x80000 /* R13B, R13W, R13D, R13 */ +#define RM_R14 0x100000 /* R14B, R14W, R14D, R14 */ +#define RM_R15 0x200000 /* R15B, R15W, R15D, R15 */ + +/* RIP should be checked using the 'flags' field and FLAG_RIP_RELATIVE. + * Segments should be checked using the segment macros. + * For now R8 - R15 are not supported and non general purpose registers map into same RM. + */ + +/* CPU flags that instructions modify, test or undefine (are EFLAGS compatible!). */ +#define D_CF 1 /* Carry */ +#define D_PF 4 /* Parity */ +#define D_AF 0x10 /* Auxiliary */ +#define D_ZF 0x40 /* Zero */ +#define D_SF 0x80 /* Sign */ +#define D_IF 0x200 /* Interrupt */ +#define D_DF 0x400 /* Direction */ +#define D_OF 0x800 /* Overflow */ + +/* + * Instructions Set classes: + * if you want a better understanding of the available classes, look at disOps project, file: x86sets.py. + */ +/* Indicates the instruction belongs to the General Integer set. */ +#define ISC_INTEGER 1 +/* Indicates the instruction belongs to the 387 FPU set. */ +#define ISC_FPU 2 +/* Indicates the instruction belongs to the P6 set. */ +#define ISC_P6 3 +/* Indicates the instruction belongs to the MMX set. */ +#define ISC_MMX 4 +/* Indicates the instruction belongs to the SSE set. */ +#define ISC_SSE 5 +/* Indicates the instruction belongs to the SSE2 set. */ +#define ISC_SSE2 6 +/* Indicates the instruction belongs to the SSE3 set. */ +#define ISC_SSE3 7 +/* Indicates the instruction belongs to the SSSE3 set. */ +#define ISC_SSSE3 8 +/* Indicates the instruction belongs to the SSE4.1 set. */ +#define ISC_SSE4_1 9 +/* Indicates the instruction belongs to the SSE4.2 set. */ +#define ISC_SSE4_2 10 +/* Indicates the instruction belongs to the AMD's SSE4.A set. */ +#define ISC_SSE4_A 11 +/* Indicates the instruction belongs to the 3DNow! set. */ +#define ISC_3DNOW 12 +/* Indicates the instruction belongs to the 3DNow! Extensions set. */ +#define ISC_3DNOWEXT 13 +/* Indicates the instruction belongs to the VMX (Intel) set. */ +#define ISC_VMX 14 +/* Indicates the instruction belongs to the SVM (AMD) set. */ +#define ISC_SVM 15 +/* Indicates the instruction belongs to the AVX (Intel) set. */ +#define ISC_AVX 16 +/* Indicates the instruction belongs to the FMA (Intel) set. */ +#define ISC_FMA 17 +/* Indicates the instruction belongs to the AES/AVX (Intel) set. */ +#define ISC_AES 18 +/* Indicates the instruction belongs to the CLMUL (Intel) set. */ +#define ISC_CLMUL 19 + +/* Features for decompose: */ +#define DF_NONE 0 +/* The decoder will limit addresses to a maximum of 16 bits. */ +#define DF_MAXIMUM_ADDR16 1 +/* The decoder will limit addresses to a maximum of 32 bits. */ +#define DF_MAXIMUM_ADDR32 2 +/* The decoder will return only flow control instructions (and filter the others internally). */ +#define DF_RETURN_FC_ONLY 4 +/* The decoder will stop and return to the caller when the instruction 'CALL' (near and far) was decoded. */ +#define DF_STOP_ON_CALL 8 +/* The decoder will stop and return to the caller when the instruction 'RET' (near and far) was decoded. */ +#define DF_STOP_ON_RET 0x10 +/* The decoder will stop and return to the caller when the instruction system-call/ret was decoded. */ +#define DF_STOP_ON_SYS 0x20 +/* The decoder will stop and return to the caller when any of the branch 'JMP', (near and far) instructions were decoded. */ +#define DF_STOP_ON_UNC_BRANCH 0x40 +/* The decoder will stop and return to the caller when any of the conditional branch instruction were decoded. */ +#define DF_STOP_ON_CND_BRANCH 0x80 +/* The decoder will stop and return to the caller when the instruction 'INT' (INT, INT1, INTO, INT 3) was decoded. */ +#define DF_STOP_ON_INT 0x100 +/* The decoder will stop and return to the caller when any of the 'CMOVxx' instruction was decoded. */ +#define DF_STOP_ON_CMOV 0x200 +/* The decoder will stop and return to the caller when any flow control instruction was decoded. */ +#define DF_STOP_ON_FLOW_CONTROL (DF_STOP_ON_CALL | DF_STOP_ON_RET | DF_STOP_ON_SYS | DF_STOP_ON_UNC_BRANCH | DF_STOP_ON_CND_BRANCH | DF_STOP_ON_INT | DF_STOP_ON_CMOV) + +/* Indicates the instruction is not a flow-control instruction. */ +#define FC_NONE 0 +/* Indicates the instruction is one of: CALL, CALL FAR. */ +#define FC_CALL 1 +/* Indicates the instruction is one of: RET, IRET, RETF. */ +#define FC_RET 2 +/* Indicates the instruction is one of: SYSCALL, SYSRET, SYSENTER, SYSEXIT. */ +#define FC_SYS 3 +/* Indicates the instruction is one of: JMP, JMP FAR. */ +#define FC_UNC_BRANCH 4 +/* + * Indicates the instruction is one of: + * JCXZ, JO, JNO, JB, JAE, JZ, JNZ, JBE, JA, JS, JNS, JP, JNP, JL, JGE, JLE, JG, LOOP, LOOPZ, LOOPNZ. + */ +#define FC_CND_BRANCH 5 +/* Indiciates the instruction is one of: INT, INT1, INT 3, INTO, UD2. */ +#define FC_INT 6 +/* Indicates the instruction is one of: CMOVxx. */ +#define FC_CMOV 7 + +/* Return code of the decoding function. */ +typedef enum { DECRES_NONE, DECRES_SUCCESS, DECRES_MEMORYERR, DECRES_INPUTERR, DECRES_FILTERED } _DecodeResult; + +/* Define the following interface functions only for outer projects. */ +#if !(defined(DISTORM_STATIC) || defined(DISTORM_DYNAMIC)) + +/* distorm_decode + * Input: + * offset - Origin of the given code (virtual address that is), NOT an offset in code. + * code - Pointer to the code buffer to be disassembled. + * length - Amount of bytes that should be decoded from the code buffer. + * dt - Decoding mode, 16 bits (Decode16Bits), 32 bits (Decode32Bits) or AMD64 (Decode64Bits). + * result - Array of type _DecodeInst which will be used by this function in order to return the disassembled instructions. + * maxInstructions - The maximum number of entries in the result array that you pass to this function, so it won't exceed its bound. + * usedInstructionsCount - Number of the instruction that successfully were disassembled and written to the result array. + * Output: usedInstructionsCount will hold the number of entries used in the result array + * and the result array itself will be filled with the disassembled instructions. + * Return: DECRES_SUCCESS on success (no more to disassemble), DECRES_INPUTERR on input error (null code buffer, invalid decoding mode, etc...), + * DECRES_MEMORYERR when there are not enough entries to use in the result array, BUT YOU STILL have to check for usedInstructionsCount! + * Side-Effects: Even if the return code is DECRES_MEMORYERR, there might STILL be data in the + * array you passed, this function will try to use as much entries as possible! + * Notes: 1)The minimal size of maxInstructions is 15. + * 2)You will have to synchronize the offset,code and length by yourself if you pass code fragments and not a complete code block! + */ + +/* distorm_decompose + * See more documentation online at the GitHub project's wiki. + * + */ +#ifdef SUPPORT_64BIT_OFFSET + + _DecodeResult distorm_decompose64(_CodeInfo* ci, _DInst result[], unsigned int maxInstructions, unsigned int* usedInstructionsCount); + #define distorm_decompose distorm_decompose64 + +#ifndef DISTORM_LIGHT + /* If distorm-light is defined, we won't export these text-formatting functionality. */ + _DecodeResult distorm_decode64(_OffsetType codeOffset, const unsigned char* code, int codeLen, _DecodeType dt, _DecodedInst result[], unsigned int maxInstructions, unsigned int* usedInstructionsCount); + void distorm_format64(const _CodeInfo* ci, const _DInst* di, _DecodedInst* result); + #define distorm_decode distorm_decode64 + #define distorm_format distorm_format64 +#endif /*DISTORM_LIGHT*/ + +#else /*SUPPORT_64BIT_OFFSET*/ + + _DecodeResult distorm_decompose32(_CodeInfo* ci, _DInst result[], unsigned int maxInstructions, unsigned int* usedInstructionsCount); + #define distorm_decompose distorm_decompose32 + +#ifndef DISTORM_LIGHT + /* If distorm-light is defined, we won't export these text-formatting functionality. */ + _DecodeResult distorm_decode32(_OffsetType codeOffset, const unsigned char* code, int codeLen, _DecodeType dt, _DecodedInst result[], unsigned int maxInstructions, unsigned int* usedInstructionsCount); + void distorm_format32(const _CodeInfo* ci, const _DInst* di, _DecodedInst* result); + #define distorm_decode distorm_decode32 + #define distorm_format distorm_format32 +#endif /*DISTORM_LIGHT*/ + +#endif + +_DInst Decode(uint8_t *code, size_t codeLen, unsigned is_64bit); + +/* + * distorm_version + * Input: + * none + * + * Output: unsigned int - version of compiled library. + */ +unsigned int distorm_version(void); + +#endif /* DISTORM_STATIC */ + +#ifdef __cplusplus +} /* End Of Extern */ +#endif + +#endif /* DISTORM_H */ diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/instructions.c b/module/src/main/cpp/whale/src/dbi/x86/distorm/instructions.c new file mode 100644 index 00000000..0022980b --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/instructions.c @@ -0,0 +1,597 @@ +/* +instructions.c + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ + + +#include "instructions.h" + +#include "insts.h" +#include "prefix.h" +#include "x86defs.h" +#include "mnemonics.h" + + +/* Helper macros to extract the type or index from an inst-node value. */ +#define INST_NODE_INDEX(n) ((n) & 0x1fff) +#define INST_NODE_TYPE(n) ((n) >> 13) + +/* Helper macro to read the actual flags that are associated with an inst-info. */ +#define INST_INFO_FLAGS(ii) (FlagsTable[InstSharedInfoTable[(ii)->sharedIndex].flagsIndex]) + +/* +I use the trie data structure as I found it most fitting to a disassembler mechanism. +When you read a byte and have to decide if it's enough or you should read more bytes, 'till you get to the instruction information. +It's really fast because you POP the instruction info in top 3 iterates on the DB, because an instruction can be formed from two bytes + 3 bits reg from the ModR/M byte. +For a simple explanation, check this out: +http://www.csse.monash.edu.au/~lloyd/tildeAlgDS/Tree/Trie/ +Further reading: http://en.wikipedia.org/wiki/Trie + +The first GATE (array you read off a trie data structure), as I call them, is statically allocated by the compiler. +The second and third gates if used are being allocated dynamically by the instructions-insertion functionality. + +How would such a thing look in memory, say we support 4 instructions with 3 bytes top (means 2 dynamically allocated gates). + +-> +|-------| 0, +|0| -------------------------------> |-------| +|1|RET | 1, |0|AND | +|2| -----> |-------| |1|XOR | +|3|INT3 | |0|PUSH | |2|OR | 0,3, +|-------| |1|POP | |3| --------->|-------| + |2|PUSHF| |-------| |0|ROR | + |3|POPF | |1|ROL | + |-------| |2|SHR | + |3|SHL | + |-------| + +Of course, this is NOT how Intel instructions set looks!!! +but I just wanted to give a small demonstration. +Now the instructions you get from such a trie DB goes like this: + +0, 0 - AND +0, 1 - XOR +0, 2 - OR +0, 3, 0, ROR +0, 3, 1, ROL +0, 3, 2, SHR +0, 3, 3, SHL +1 - RET +2, 0 - PUSH +2, 1 - POP +2, 2 - PUSHF +2, 3 - POPF +3 - INT3 + +I guess it's clear by now. +So now, if you read 0, you know that you have to enter the second gate(list) with the second byte specifying the index. +But if you read 1, you know that you go to an instruction (in this case, a RET). +That's why there's an Instruction-Node structure, it tells you whether you got to an instruction or another list +so you should keep on reading byte). + +In Intel, you could go through 4 gates at top, because there are instructions which are built from 2 bytes and another smaller list +for the REG part, or newest SSE4 instructions which use 4 bytes for opcode. +Therefore, Intel's first gate is 256 long, and other gates are 256 (/72) or 8 long, yes, it costs pretty much a lot of memory +for non-used defined instructions, but I think that it still rocks. +*/ + +/* + * A helper function to look up the correct inst-info structure. + * It does one fetch from the index-table, and then another to get the inst-info. + * Note that it takes care about basic inst-info or inst-info-ex. + * The caller should worry about boundary checks and whether it accesses a last-level table. + */ +static _InstInfo* inst_get_info(_InstNode in, int index) +{ + int instIndex = 0; + + in = InstructionsTree[INST_NODE_INDEX(in) + index]; + if (in == INT_NOTEXISTS) return NULL; + + instIndex = INST_NODE_INDEX(in); + return INST_NODE_TYPE(in) == INT_INFO ? &InstInfos[instIndex] : (_InstInfo*)&InstInfosEx[instIndex]; +} + +/* + * This function is responsible to return the instruction information of the first found in code. + * It returns the _InstInfo of the found instruction, otherwise NULL. + * code should point to the ModR/M byte upon exit (if used), or after the instruction binary code itself. + * This function is NOT decoding-type dependant, it is up to the caller to see whether the instruction is valid. + * Get the instruction info, using a Trie data structure. + * + * Sometimes normal prefixes become mandatory prefixes, which means they are now part of the instruction opcode bytes. + + * This is a bit tricky now, + * if the first byte is a REP (F3) prefix, we will have to give a chance to an SSE instruction. + * If an instruction doesn't exist, we will make it as a prefix and re-locateinst. + * A case such that a REP prefix is being changed into an instruction byte and also an SSE instruction will not be found can't happen, + * simply because there are no collisions between string instruction and SSE instructions (they are escaped). + + * As for S/SSE2/3, check for F2 and 66 as well. + + * In 64 bits, we have to make sure that we will skip the REX prefix, if it exists. + * There's a specific case, where a 66 is mandatory but it was dropped because REG.W was used, + * but it doesn't behave as an operand size prefix but as a mandatory, so we will have to take it into account. + + * For example (64 bits decoding mode): + * 66 98 CBW + * 48 98 CDQE + * 66 48 98: db 0x66; CDQE + * Shows that operand size is dropped. + + * Now, it's a mandatory prefix and NOT an operand size one. + * 66480f2dc0 db 0x48; CVTPD2PI XMM0, XMM0 + * Although this instruction doesn't require a REX.W, it just shows, that even if it did - it doesn't matter. + * REX.W is dropped because it's not required, but the decode function disabled the operand size even so. + */ +static _InstInfo* inst_lookup_prefixed(_InstNode in, _PrefixState* ps) +{ + int checkOpSize = FALSE; + int index = 0; + _InstInfo* ii = NULL; + + /* Check prefixes of current decoded instruction (None, 0x66, 0xf3, 0xf2). */ + switch (ps->decodedPrefixes & (INST_PRE_OP_SIZE | INST_PRE_REPS)) + { + case 0: + /* Non-prefixed, index = 0. */ + index = 0; + break; + case INST_PRE_OP_SIZE: + /* 0x66, index = 1. */ + index = 1; + /* Mark that we used it as a mandatory prefix. */ + ps->isOpSizeMandatory = TRUE; + ps->decodedPrefixes &= ~INST_PRE_OP_SIZE; + break; + case INST_PRE_REP: + /* 0xf3, index = 2. */ + index = 2; + ps->decodedPrefixes &= ~INST_PRE_REP; + break; + case INST_PRE_REPNZ: + /* 0xf2, index = 3. */ + index = 3; + ps->decodedPrefixes &= ~INST_PRE_REPNZ; + break; + default: + /* + * Now we got a problem, since there are a few mandatory prefixes at once. + * There is only one case when it's ok, when the operand size prefix is for real (not mandatory). + * Otherwise we will have to return NULL, since the instruction is illegal. + * Therefore we will start with REPNZ and REP prefixes, + * try to get the instruction and only then check for the operand size prefix. + */ + + /* If both REPNZ and REP are together, it's illegal for sure. */ + if ((ps->decodedPrefixes & INST_PRE_REPS) == INST_PRE_REPS) return NULL; + + /* Now we know it's either REPNZ+OPSIZE or REP+OPSIZE, so examine the instruction. */ + if (ps->decodedPrefixes & INST_PRE_REPNZ) { + index = 3; + ps->decodedPrefixes &= ~INST_PRE_REPNZ; + } else if (ps->decodedPrefixes & INST_PRE_REP) { + index = 2; + ps->decodedPrefixes &= ~INST_PRE_REP; + } + /* Mark to verify the operand-size prefix of the fetched instruction below. */ + checkOpSize = TRUE; + break; + } + + /* Fetch the inst-info from the index. */ + ii = inst_get_info(in, index); + + if (checkOpSize) { + /* If the instruction doesn't support operand size prefix, then it's illegal. */ + if ((ii == NULL) || (~INST_INFO_FLAGS(ii) & INST_PRE_OP_SIZE)) return NULL; + } + + /* If there was a prefix, but the instruction wasn't found. Try to fall back to use the normal instruction. */ + if (ii == NULL) ii = inst_get_info(in, 0); + return ii; +} + +/* A helper function to look up special VEX instructions. + * See if it's a MOD based instruction and fix index if required. + * Only after a first lookup (that was done by caller), we can tell if we need to fix the index. + * Because these are coupled instructions + * (which means that the base instruction hints about the other instruction). + * Note that caller should check if it's a MOD dependent instruction before getting in here. + */ +static _InstInfo* inst_vex_mod_lookup(_CodeInfo* ci, _InstNode in, _InstInfo* ii, unsigned int index) +{ + /* Advance to read the MOD from ModRM byte. */ + ci->code += 1; + ci->codeLen -= 1; + if (ci->codeLen < 0) return NULL; + if (*ci->code < INST_DIVIDED_MODRM) { + /* MOD is not 11, therefore change the index to 8 - 12 range in the prefixed table. */ + index += 4; + /* Make a second lookup for this special instruction. */ + return inst_get_info(in, index); + } + /* Return the original one, in case we didn't find a suited instruction. */ + return ii; +} + +static _InstInfo* inst_vex_lookup(_CodeInfo* ci, _PrefixState* ps) +{ + _InstNode in = 0; + unsigned int pp = 0, start = 0; + unsigned int index = 4; /* VEX instructions start at index 4 in the Prefixed table. */ + uint8_t vex = *ps->vexPos, vex2 = 0, v = 0; + int instType = 0, instIndex = 0; + + /* The VEX instruction will #ud if any of 66, f0, f2, f3, REX prefixes precede. */ + _iflags illegal = (INST_PRE_OP_SIZE | INST_PRE_LOCK | INST_PRE_REP | INST_PRE_REPNZ | INST_PRE_REX); + if ((ps->decodedPrefixes & illegal) != 0) return NULL; + + /* Read the some fields from the VEX prefix we need to extract the instruction. */ + if (ps->prefixExtType == PET_VEX2BYTES) { + ps->vexV = v = (~vex >> 3) & 0xf; + pp = vex & 3; + /* Implied leading 0x0f byte by default for 2 bytes VEX prefix. */ + start = 1; + } else { /* PET_VEX3BYTES */ + start = vex & 0x1f; + vex2 = *(ps->vexPos + 1); + ps->vexV = v = (~vex2 >> 3) & 0xf; + pp = vex2 & 3; + } + + /* start can be either 1 (0x0f), 2 (0x0f, 0x038) or 3 (0x0f, 0x3a), otherwise it's illegal. */ + switch (start) + { + case 1: in = Table_0F; break; + case 2: in = Table_0F_38; break; + case 3: in = Table_0F_3A; break; + default: return NULL; + } + + /* pp is actually the implied mandatory prefix, apply it to the index. */ + index += pp; /* (None, 0x66, 0xf3, 0xf2) */ + + /* Read a byte from the stream. */ + ci->codeLen -= 1; + if (ci->codeLen < 0) return NULL; + + in = InstructionsTree[INST_NODE_INDEX(in) + *ci->code]; + if (in == INT_NOTEXISTS) return NULL; + + instType = INST_NODE_TYPE(in); + instIndex = INST_NODE_INDEX(in); + + /* + * If we started with 0f38 or 0f3a so it's a prefixed table, + * therefore it's surely a VEXed instruction (because of a high index). + * However, starting with 0f, could also lead immediately to a prefixed table for some bytes. + * it might return NULL, if the index is invalid. + */ + if (instType == INT_LIST_PREFIXED) { + _InstInfo* ii = inst_get_info(in, index); + /* See if the instruction is dependent on MOD. */ + if ((ii != NULL) && (((_InstInfoEx*)ii)->flagsEx & INST_MODRR_BASED)) { + ii = inst_vex_mod_lookup(ci, in, ii, index); + } + return ii; + } + + /* + * If we reached here, obviously we started with 0f. VEXed instructions must be nodes of a prefixed table. + * But since we found an instruction (or divided one), just return NULL. + * They cannot lead to a VEXed instruction. + */ + if ((instType == INT_INFO) || (instType == INT_INFOEX) || (instType == INT_LIST_DIVIDED)) return NULL; + + /* Now we are left with handling either GROUP or FULL tables, therefore we will read another byte from the stream. */ + ci->code += 1; + ci->codeLen -= 1; + if (ci->codeLen < 0) return NULL; + + if (instType == INT_LIST_GROUP) { + in = InstructionsTree[instIndex + ((*ci->code >> 3) & 7)]; + /* Continue below to check prefixed table. */ + } else if (instType == INT_LIST_FULL) { + in = InstructionsTree[instIndex + *ci->code]; + /* Continue below to check prefixed table. */ + } + + /* Now that we got to the last table in the trie, check for a prefixed table. */ + if (INST_NODE_TYPE(in) == INT_LIST_PREFIXED) { + _InstInfo* ii = inst_get_info(in, index); + /* See if the instruction is dependent on MOD. */ + if ((ii != NULL) && (((_InstInfoEx*)ii)->flagsEx & INST_MODRR_BASED)) { + ii = inst_vex_mod_lookup(ci, in, ii, index); + } + return ii; + } + + /* No VEXed instruction was found. */ + return NULL; +} + +_InstInfo* inst_lookup(_CodeInfo* ci, _PrefixState* ps) +{ + unsigned int tmpIndex0 = 0, tmpIndex1 = 0, tmpIndex2 = 0, rex = ps->vrex; + int instType = 0; + _InstNode in = 0; + _InstInfo* ii = NULL; + int isWaitIncluded = FALSE; + + /* See whether we have to handle a VEX prefixed instruction. */ + if (ps->decodedPrefixes & INST_PRE_VEX) { + ii = inst_vex_lookup(ci, ps); + if (ii != NULL) { + /* Make sure that VEX.L exists when forced. */ + if ((((_InstInfoEx*)ii)->flagsEx & INST_FORCE_VEXL) && (~ps->vrex & PREFIX_EX_L)) return NULL; + /* If the instruction doesn't use VEX.vvvv it must be zero. */ + if ((((_InstInfoEx*)ii)->flagsEx & INST_VEX_V_UNUSED) && ps->vexV) return NULL; + } + return ii; + } + + /* Read first byte. */ + ci->codeLen -= 1; + if (ci->codeLen < 0) return NULL; + tmpIndex0 = *ci->code; + + /* Check for special 0x9b, WAIT instruction, which can be part of some instructions(x87). */ + if (tmpIndex0 == INST_WAIT_INDEX) { + /* Only OCST_1dBYTES get a chance to include this byte as part of the opcode. */ + isWaitIncluded = TRUE; + + /* Ignore all prefixes, since they are useless and operate on the WAIT instruction itself. */ + prefixes_ignore_all(ps); + + /* Move to next code byte as a new whole instruction. */ + ci->code += 1; + ci->codeLen -= 1; + if (ci->codeLen < 0) return NULL; /* Faster to return NULL, it will be detected as WAIT later anyway. */ + /* Since we got a WAIT prefix, we re-read the first byte. */ + tmpIndex0 = *ci->code; + } + + /* Walk first byte in InstructionsTree root. */ + in = InstructionsTree[tmpIndex0]; + if (in == INT_NOTEXISTS) return NULL; + instType = INST_NODE_TYPE(in); + + /* Single byte instruction (OCST_1BYTE). */ + if ((instType < INT_INFOS) && (!isWaitIncluded)) { + /* Some single byte instructions need extra treatment. */ + switch (tmpIndex0) + { + case INST_ARPL_INDEX: + /* + * ARPL/MOVSXD share the same opcode, and both have different operands and mnemonics, of course. + * Practically, I couldn't come up with a comfortable way to merge the operands' types of ARPL/MOVSXD. + * And since the DB can't be patched dynamically, because the DB has to be multi-threaded compliant, + * I have no choice but to check for ARPL/MOVSXD right here - "right about now, the funk soul brother, check it out now, the funk soul brother...", fatboy slim + */ + if (ci->dt == Decode64Bits) { + return &II_MOVSXD; + } /* else ARPL will be returned because its defined in the DB already. */ + break; + + case INST_NOP_INDEX: /* Nopnopnop */ + /* Check for Pause, since it's prefixed with 0xf3, which is not a real mandatory prefix. */ + if (ps->decodedPrefixes & INST_PRE_REP) { + /* Flag this prefix as used. */ + ps->usedPrefixes |= INST_PRE_REP; + return &II_PAUSE; + } + + /* + * Treat NOP/XCHG specially. + * If we're not in 64bits restore XCHG to NOP, since in the DB it's XCHG. + * Else if we're in 64bits examine REX, if exists, and decide which instruction should go to output. + * 48 90 XCHG RAX, RAX is a true NOP (eat REX in this case because it's valid). + * 90 XCHG EAX, EAX is a true NOP (and not high dword of RAX = 0 although it should be a 32 bits operation). + * Note that if the REX.B is used, then the register is not RAX anymore but R8, which means it's not a NOP. + */ + if (rex & PREFIX_EX_W) ps->usedPrefixes |= INST_PRE_REX; + if ((ci->dt != Decode64Bits) || (~rex & PREFIX_EX_B)) return &II_NOP; + break; + + case INST_LEA_INDEX: + /* Ignore segment override prefixes for LEA instruction. */ + ps->decodedPrefixes &= ~INST_PRE_SEGOVRD_MASK; + /* Update unused mask for ignoring segment prefix. */ + prefixes_ignore(ps, PFXIDX_SEG); + break; + } + + /* Return the 1 byte instruction we found. */ + return instType == INT_INFO ? &InstInfos[INST_NODE_INDEX(in)] : (_InstInfo*)&InstInfosEx[INST_NODE_INDEX(in)]; + } + + /* Read second byte, still doesn't mean all of its bits are used (I.E: ModRM). */ + ci->code += 1; + ci->codeLen -= 1; + if (ci->codeLen < 0) return NULL; + tmpIndex1 = *ci->code; + + /* Try single byte instruction + reg bits (OCST_13BYTES). */ + if ((instType == INT_LIST_GROUP) && (!isWaitIncluded)) return inst_get_info(in, (tmpIndex1 >> 3) & 7); + + /* Try single byte instruction + reg byte OR one whole byte (OCST_1dBYTES). */ + if (instType == INT_LIST_DIVIDED) { + + /* Checking for inst by REG bits is higher priority if it's found not to be divided instruction. */ + { + _InstNode in2 = InstructionsTree[INST_NODE_INDEX(in) + ((tmpIndex1 >> 3) & 7)]; + /* + * Do NOT check for NULL here, since we do a bit of a guess work, + * hence we don't override 'in', cause we might still need it. + */ + instType = INST_NODE_TYPE(in2); + + if (instType == INT_INFO) ii = &InstInfos[INST_NODE_INDEX(in2)]; + else if (instType == INT_INFOEX) ii = (_InstInfo*)&InstInfosEx[INST_NODE_INDEX(in2)]; + if ((ii != NULL) && (INST_INFO_FLAGS(ii) & INST_NOT_DIVIDED)) return ii; + /* ii is reset below. */ + } + + /* Continue normally because of wait prefix. */ + if (tmpIndex1 < INST_DIVIDED_MODRM) { + /* An instruction which requires a ModR/M byte. Thus it's 1.3 bytes long instruction. */ + tmpIndex1 = (tmpIndex1 >> 3) & 7; /* Isolate the 3 REG/OPCODE bits. */ + } else { /* Normal 2 bytes instruction. */ + /* + * Divided instructions can't be in the range of 0x8-0xc0. + * That's because 0-8 are used for 3 bits group. + * And 0xc0-0xff are used for not-divided instruction. + * So the in between range is omitted, thus saving some more place in the tables. + */ + tmpIndex1 -= INST_DIVIDED_MODRM - 8; + } + + in = InstructionsTree[INST_NODE_INDEX(in) + tmpIndex1]; + if (in == INT_NOTEXISTS) return NULL; + instType = INST_NODE_TYPE(in); + + if (instType < INT_INFOS) { + /* If the instruction doesn't support the wait (marked as opsize) as part of the opcode, it's illegal. */ + ii = instType == INT_INFO ? &InstInfos[INST_NODE_INDEX(in)] : (_InstInfo*)&InstInfosEx[INST_NODE_INDEX(in)]; + if ((~INST_INFO_FLAGS(ii) & INST_PRE_OP_SIZE) && (isWaitIncluded)) return NULL; + return ii; + } + /* + * If we got here the instruction can support the wait prefix, so see if it was part of the stream. + * Examine prefixed table, specially used for 0x9b, since it's optional. + * No Wait: index = 0. + * Wait Exists, index = 1. + */ + return inst_get_info(in, isWaitIncluded); + } + + /* Don't allow to continue if WAIT is part of the opcode, because there are no instructions that include it. */ + if (isWaitIncluded) return NULL; + + /* Try 2 bytes long instruction (doesn't include ModRM byte). */ + if (instType == INT_LIST_FULL) { + in = InstructionsTree[INST_NODE_INDEX(in) + tmpIndex1]; + if (in == INT_NOTEXISTS) return NULL; + instType = INST_NODE_TYPE(in); + + /* This is where we check if we just read two escape bytes in a row, which means it is a 3DNow! instruction. */ + if ((tmpIndex0 == _3DNOW_ESCAPE_BYTE) && (tmpIndex1 == _3DNOW_ESCAPE_BYTE)) return &II_3DNOW; + + /* 2 bytes instruction (OCST_2BYTES). */ + if (instType < INT_INFOS) + return instType == INT_INFO ? &InstInfos[INST_NODE_INDEX(in)] : (_InstInfo*)&InstInfosEx[INST_NODE_INDEX(in)]; + + /* + * 2 bytes + mandatory prefix. + * Mandatory prefixes can be anywhere in the prefixes. + * There cannot be more than one mandatory prefix, unless it's a normal operand size prefix. + */ + if (instType == INT_LIST_PREFIXED) return inst_lookup_prefixed(in, ps); + } + + /* Read third byte, still doesn't mean all of its bits are used (I.E: ModRM). */ + ci->code += 1; + ci->codeLen -= 1; + if (ci->codeLen < 0) return NULL; + tmpIndex2 = *ci->code; + + /* Try 2 bytes + reg instruction (OCST_23BYTES). */ + if (instType == INT_LIST_GROUP) { + in = InstructionsTree[INST_NODE_INDEX(in) + ((tmpIndex2 >> 3) & 7)]; + if (in == INT_NOTEXISTS) return NULL; + instType = INST_NODE_TYPE(in); + + if (instType < INT_INFOS) + return instType == INT_INFO ? &InstInfos[INST_NODE_INDEX(in)] : (_InstInfo*)&InstInfosEx[INST_NODE_INDEX(in)]; + + /* It has to be a prefixed table then. */ + ii = inst_lookup_prefixed(in, ps); + /* RDRAND and VMPTRLD share same 2.3 bytes opcode, and alternate on the MOD bits. See insts.h for more info. */ + if ((ii != NULL) && (ii->opcodeId == I_VMPTRLD) && (tmpIndex1 >= INST_DIVIDED_MODRM)) return &II_RDRAND; + return ii; + } + + /* Try 2 bytes + divided range (OCST_2dBYTES). */ + if (instType == INT_LIST_DIVIDED) { + _InstNode in2 = InstructionsTree[INST_NODE_INDEX(in) + ((tmpIndex2 >> 3) & 7)]; + /* + * Do NOT check for NULL here, since we do a bit of a guess work, + * hence we don't override 'in', cause we might still need it. + */ + instType = INST_NODE_TYPE(in2); + + if (instType == INT_INFO) ii = &InstInfos[INST_NODE_INDEX(in2)]; + else if (instType == INT_INFOEX) ii = (_InstInfo*)&InstInfosEx[INST_NODE_INDEX(in2)]; + + /* + * OCST_2dBYTES is complex, because there are a few instructions which are not divided in some special cases. + * If the instruction wasn't divided (but still it must be a 2.3 because we are in divided category) + * or it was an official 2.3 (because its index was less than 0xc0) - + * Then it means the instruction should be using the REG bits, otherwise give a chance to range 0xc0-0xff. + */ + /* If we found an instruction only by its REG bits, AND it is not divided, then return it. */ + if ((ii != NULL) && (INST_INFO_FLAGS(ii) & INST_NOT_DIVIDED)) return ii; + /* Otherwise, if the range is above 0xc0, try the special divided range (range 0x8-0xc0 is omitted). */ + if (tmpIndex2 >= INST_DIVIDED_MODRM) return inst_get_info(in, tmpIndex2 - INST_DIVIDED_MODRM + 8); + + /* It might be that we got here without touching ii in the above if statements, then it becomes an invalid instruction prolly. */ + return ii; + } + + /* Try 3 full bytes (OCST_3BYTES - no ModRM byte). */ + if (instType == INT_LIST_FULL) { + /* OCST_3BYTES. */ + in = InstructionsTree[INST_NODE_INDEX(in) + tmpIndex2]; + if (in == INT_NOTEXISTS) return NULL; + instType = INST_NODE_TYPE(in); + + if (instType < INT_INFOS) + return instType == INT_INFO ? &InstInfos[INST_NODE_INDEX(in)] : (_InstInfo*)&InstInfosEx[INST_NODE_INDEX(in)]; + + if (instType == INT_LIST_PREFIXED) return inst_lookup_prefixed(in, ps); + } + + /* Kahtchinggg, damn. */ + return NULL; +} + +/* +* 3DNow! instruction handling: + +* This is used when we encounter a 3DNow! instruction. +* We can't really locate a 3DNow! instruction before we see two escaped bytes, +* 0x0f, 0x0f. Then we have to extract operands which are, dest=mmx register, src=mmx register or quadword indirection. +* When we are finished with the extraction of operands we can resume to locate the instruction by reading another byte +* which tells us which 3DNow instruction we really tracked down... +* So in order to tell the extract operands function which operands the 3DNow! instruction require, we need to set up some +* generic instruction info for 3DNow! instructions. + +* In the inst_lookup itself, when we read an OCST_3BYTES which the two first bytes are 0x0f and 0x0f. +* we will return this special generic II for the specific operands we are interested in (MM, MM64). +* Then after extracting the operand, we'll call a completion routine for locating the instruction +* which will be called only for 3DNow! instructions, distinguished by a flag, and it will read the last byte of the 3 bytes. +* +* The id of this opcode should not be used, the following function should change it anyway. +*/ +_InstInfo* inst_lookup_3dnow(_CodeInfo* ci) +{ + /* Start off from the two escape bytes gates... which is 3DNow! table.*/ + _InstNode in = Table_0F_0F; + + int index; + + /* Make sure we can read a byte off the stream. */ + if (ci->codeLen < 1) return NULL; + + index = *ci->code; + + ci->codeLen -= 1; + ci->code += 1; + return inst_get_info(in, index); +} diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/instructions.h b/module/src/main/cpp/whale/src/dbi/x86/distorm/instructions.h new file mode 100644 index 00000000..f6c2c53b --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/instructions.h @@ -0,0 +1,463 @@ +/* +instructions.h + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ + + +#ifndef INSTRUCTIONS_H +#define INSTRUCTIONS_H + +#include "config.h" +#include "prefix.h" + + +/* + * Operand type possibilities: + * Note "_FULL" suffix indicates to decode the operand as 16 bits or 32 bits depends on DecodeType - + * actually, it depends on the decoding mode, unless there's an operand/address size prefix. + * For example, the code: 33 c0 could be decoded/executed as XOR AX, AX or XOR EAX, EAX. + */ +typedef enum OpType { + /* No operand is set */ + OT_NONE = 0, + + /* Read a byte(8 bits) immediate */ + OT_IMM8, + /* Force a read of a word(16 bits) immediate, used by ret only */ + OT_IMM16, + /* Read a word/dword immediate */ + OT_IMM_FULL, + /* Read a double-word(32 bits) immediate */ + OT_IMM32, + + /* Read a signed extended byte(8 bits) immediate */ + OT_SEIMM8, + + /* + * Special immediates for instructions which have more than one immediate, + * which is an exception from standard instruction format. + * As to version v1.0: ENTER, INSERTQ, EXTRQ are the only problematic ones. + */ + /* 16 bits immediate using the first imm-slot */ + OT_IMM16_1, + /* 8 bits immediate using the first imm-slot */ + OT_IMM8_1, + /* 8 bits immediate using the second imm-slot */ + OT_IMM8_2, + + /* Use a 8bit register */ + OT_REG8, + /* Use a 16bit register */ + OT_REG16, + /* Use a 16/32/64bit register */ + OT_REG_FULL, + /* Use a 32bit register */ + OT_REG32, + /* + * If used with REX the reg operand size becomes 64 bits, otherwise 32 bits. + * VMX instructions are promoted automatically without a REX prefix. + */ + OT_REG32_64, + /* Used only by MOV CR/DR(n). Promoted with REX onlly. */ + OT_FREG32_64_RM, + + /* Use or read (indirection) a 8bit register or immediate byte */ + OT_RM8, + /* Some instructions force 16 bits (mov sreg, rm16) */ + OT_RM16, + /* Use or read a 16/32/64bit register or immediate word/dword/qword */ + OT_RM_FULL, + /* + * 32 or 64 bits (with REX) operand size indirection memory operand. + * Some instructions are promoted automatically without a REX prefix. + */ + OT_RM32_64, + /* 16 or 32 bits RM. This is used only with MOVZXD instruction in 64bits. */ + OT_RM16_32, + /* Same as OT_RMXX but POINTS to 16 bits [cannot use GENERAL-PURPOSE REG!] */ + OT_FPUM16, + /* Same as OT_RMXX but POINTS to 32 bits (single precision) [cannot use GENERAL-PURPOSE REG!] */ + OT_FPUM32, + /* Same as OT_RMXX but POINTS to 64 bits (double precision) [cannot use GENERAL-PURPOSE REG!] */ + OT_FPUM64, + /* Same as OT_RMXX but POINTS to 80 bits (extended precision) [cannot use GENERAL-PURPOSE REG!] */ + OT_FPUM80, + + /* + * Special operand type for SSE4 where the ModR/M might + * be a 32 bits register or 8 bits memory indirection operand. + */ + OT_R32_M8, + /* + * Special ModR/M for PINSRW, which need a 16 bits memory operand or 32 bits register. + * In 16 bits decoding mode R32 becomes R16, operand size cannot affect this. + */ + OT_R32_M16, + /* + * Special type for SSE4, ModR/M might be a 32 bits or 64 bits (with REX) register or + * a 8 bits memory indirection operand. + */ + OT_R32_64_M8, + /* + * Special type for SSE4, ModR/M might be a 32 bits or 64 bits (with REX) register or + * a 16 bits memory indirection operand. + */ + OT_R32_64_M16, + /* + * Special operand type for MOV reg16/32/64/mem16, segReg 8C /r. and SMSW. + * It supports all decoding modes, but if used as a memory indirection it's a 16 bit ModR/M indirection. + */ + OT_RFULL_M16, + + /* Use a control register */ + OT_CREG, + /* Use a debug register */ + OT_DREG, + /* Use a segment register */ + OT_SREG, + /* + * SEG is encoded in the flags of the opcode itself! + * This is used for specific "push SS" where SS is a segment where + * each "push SS" has an absolutely different opcode byte. + * We need this to detect whether an operand size prefix is used. + */ + OT_SEG, + + /* Use AL */ + OT_ACC8, + /* Use AX (FSTSW) */ + OT_ACC16, + /* Use AX/EAX/RAX */ + OT_ACC_FULL, + /* Use AX/EAX, no REX is possible for RAX, used only with IN/OUT which don't support 64 bit registers */ + OT_ACC_FULL_NOT64, + + /* + * Read one word (seg), and a word/dword/qword (depends on operand size) from memory. + * JMP FAR [EBX] means EBX point to 16:32 ptr. + */ + OT_MEM16_FULL, + /* Read one word (seg) and a word/dword/qword (depends on operand size), usually SEG:OFF, JMP 1234:1234 */ + OT_PTR16_FULL, + /* Read one word (limit) and a dword/qword (limit) (depends on operand size), used by SGDT, SIDT, LGDT, LIDT. */ + OT_MEM16_3264, + + /* Read a byte(8 bits) immediate and calculate it relatively to the current offset of the instruction being decoded */ + OT_RELCB, + /* Read a word/dword immediate and calculate it relatively to the current offset of the instruction being decoded */ + OT_RELC_FULL, + + /* Use general memory indirection, with varying sizes: */ + OT_MEM, + /* Used when a memory indirection is required, but if the mod field is 11, this operand will be ignored. */ + OT_MEM_OPT, + OT_MEM32, + /* Memory dereference for MOVNTI, either 32 or 64 bits (with REX). */ + OT_MEM32_64, + OT_MEM64, + OT_MEM128, + /* Used for cmpxchg8b/16b. */ + OT_MEM64_128, + + /* Read an immediate as an absolute address, size is known by instruction, used by MOV (memory offset) only */ + OT_MOFFS8, + OT_MOFFS_FULL, + /* Use an immediate of 1, as for SHR R/M, 1 */ + OT_CONST1, + /* Use CL, as for SHR R/M, CL */ + OT_REGCL, + + /* + * Instruction-Block for one byte long instructions, used by INC/DEC/PUSH/POP/XCHG, + * REG is extracted from the value of opcode + * Use a 8bit register + */ + OT_IB_RB, + /* Use a 16/32/64bit register */ + OT_IB_R_FULL, + + /* Use [(r)SI] as INDIRECTION, for repeatable instructions */ + OT_REGI_ESI, + /* Use [(r)DI] as INDIRECTION, for repeatable instructions */ + OT_REGI_EDI, + /* Use [(r)BX + AL] as INDIRECTIOM, used by XLAT only */ + OT_REGI_EBXAL, + /* Use [(r)AX] as INDIRECTION, used by AMD's SVM instructions */ + OT_REGI_EAX, + /* Use DX, as for OUTS DX, BYTE [SI] */ + OT_REGDX, + /* Use ECX in INVLPGA instruction */ + OT_REGECX, + + /* FPU registers: */ + OT_FPU_SI, /* ST(i) */ + OT_FPU_SSI, /* ST(0), ST(i) */ + OT_FPU_SIS, /* ST(i), ST(0) */ + + /* MMX registers: */ + OT_MM, + /* Extract the MMX register from the RM bits this time (used when the REG bits are used for opcode extension) */ + OT_MM_RM, + /* ModR/M points to 32 bits MMX variable */ + OT_MM32, + /* ModR/M points to 32 bits MMX variable */ + OT_MM64, + + /* SSE registers: */ + OT_XMM, + /* Extract the SSE register from the RM bits this time (used when the REG bits are used for opcode extension) */ + OT_XMM_RM, + /* ModR/M points to 16 bits SSE variable */ + OT_XMM16, + /* ModR/M points to 32 bits SSE variable */ + OT_XMM32, + /* ModR/M points to 64 bits SSE variable */ + OT_XMM64, + /* ModR/M points to 128 bits SSE variable */ + OT_XMM128, + /* Implied XMM0 register as operand, used in SSE4. */ + OT_REGXMM0, + + /* AVX operands: */ + + /* ModR/M for 32 bits. */ + OT_RM32, + /* Reg32/Reg64 (prefix width) or Mem8. */ + OT_REG32_64_M8, + /* Reg32/Reg64 (prefix width) or Mem16. */ + OT_REG32_64_M16, + /* Reg32/Reg 64 depends on prefix width only. */ + OT_WREG32_64, + /* RM32/RM64 depends on prefix width only. */ + OT_WRM32_64, + /* XMM or Mem32/Mem64 depends on perfix width only. */ + OT_WXMM32_64, + /* XMM is encoded in VEX.VVVV. */ + OT_VXMM, + /* XMM is encoded in the high nibble of an immediate byte. */ + OT_XMM_IMM, + /* YMM/XMM is dependent on VEX.L. */ + OT_YXMM, + /* YMM/XMM (depends on prefix length) is encoded in the high nibble of an immediate byte. */ + OT_YXMM_IMM, + /* YMM is encoded in reg. */ + OT_YMM, + /* YMM or Mem256. */ + OT_YMM256, + /* YMM is encoded in VEX.VVVV. */ + OT_VYMM, + /* YMM/XMM is dependent on VEX.L, and encoded in VEX.VVVV. */ + OT_VYXMM, + /* YMM/XMM or Mem64/Mem256 is dependent on VEX.L. */ + OT_YXMM64_256, + /* YMM/XMM or Mem128/Mem256 is dependent on VEX.L. */ + OT_YXMM128_256, + /* XMM or Mem64/Mem256 is dependent on VEX.L. */ + OT_LXMM64_128, + /* Mem128/Mem256 is dependent on VEX.L. */ + OT_LMEM128_256 +} _OpType; + +/* Flags for instruction: */ + +/* Empty flags indicator: */ +#define INST_FLAGS_NONE (0) +/* The instruction we are going to decode requires ModR/M encoding. */ +#define INST_MODRM_REQUIRED (1) +/* Special treatment for instructions which are in the divided-category but still needs the whole byte for ModR/M... */ +#define INST_NOT_DIVIDED (1 << 1) +/* + * Used explicitly in repeatable instructions, + * which needs a suffix letter in their mnemonic to specify operation-size (depend on operands). + */ +#define INST_16BITS (1 << 2) +/* If the opcode is supported by 80286 and upper models (16/32 bits). */ +#define INST_32BITS (1 << 3) +/* + * Prefix flags (6 types: lock/rep, seg override, addr-size, oper-size, REX, VEX) + * There are several specific instructions that can follow LOCK prefix, + * note that they must be using a memory operand form, otherwise they generate an exception. + */ +#define INST_PRE_LOCK (1 << 4) +/* REPNZ prefix for string instructions only - means an instruction can follow it. */ +#define INST_PRE_REPNZ (1 << 5) +/* REP prefix for string instructions only - means an instruction can follow it. */ +#define INST_PRE_REP (1 << 6) +/* CS override prefix. */ +#define INST_PRE_CS (1 << 7) +/* SS override prefix. */ +#define INST_PRE_SS (1 << 8) +/* DS override prefix. */ +#define INST_PRE_DS (1 << 9) +/* ES override prefix. */ +#define INST_PRE_ES (1 << 10) +/* FS override prefix. Funky Segment :) */ +#define INST_PRE_FS (1 << 11) +/* GS override prefix. Groovy Segment, of course not, duh ! */ +#define INST_PRE_GS (1 << 12) +/* Switch operand size from 32 to 16 and vice versa. */ +#define INST_PRE_OP_SIZE (1 << 13) +/* Switch address size from 32 to 16 and vice versa. */ +#define INST_PRE_ADDR_SIZE (1 << 14) +/* Native instructions which needs suffix letter to indicate their operation-size (and don't depend on operands). */ +#define INST_NATIVE (1 << 15) +/* Use extended mnemonic, means it's an _InstInfoEx structure, which contains another mnemonic for 32 bits specifically. */ +#define INST_USE_EXMNEMONIC (1 << 16) +/* Use third operand, means it's an _InstInfoEx structure, which contains another operand for special instructions. */ +#define INST_USE_OP3 (1 << 17) +/* Use fourth operand, means it's an _InstInfoEx structure, which contains another operand for special instructions. */ +#define INST_USE_OP4 (1 << 18) +/* The instruction's mnemonic depends on the mod value of the ModR/M byte (mod=11, mod!=11). */ +#define INST_MNEMONIC_MODRM_BASED (1 << 19) +/* The instruction uses a ModR/M byte which the MOD must be 11 (for registers operands only). */ +#define INST_MODRR_REQUIRED (1 << 20) +/* The way of 3DNow! instructions are built, we have to handle their locating specially. Suffix imm8 tells which instruction it is. */ +#define INST_3DNOW_FETCH (1 << 21) +/* The instruction needs two suffixes, one for the comparison type (imm8) and the second for its operation size indication (second mnemonic). */ +#define INST_PSEUDO_OPCODE (1 << 22) +/* Invalid instruction at 64 bits decoding mode. */ +#define INST_INVALID_64BITS (1 << 23) +/* Specific instruction can be promoted to 64 bits (without REX, it is promoted automatically). */ +#define INST_64BITS (1 << 24) +/* Indicates the instruction must be REX prefixed in order to use 64 bits operands. */ +#define INST_PRE_REX (1 << 25) +/* Third mnemonic is set. */ +#define INST_USE_EXMNEMONIC2 (1 << 26) +/* Instruction is only valid in 64 bits decoding mode. */ +#define INST_64BITS_FETCH (1 << 27) +/* Forces that the ModRM-REG/Opcode field will be 0. (For EXTRQ). */ +#define INST_FORCE_REG0 (1 << 28) +/* Indicates that instruction is encoded with a VEX prefix. */ +#define INST_PRE_VEX (1 << 29) +/* Indicates that the instruction is encoded with a ModRM byte (REG field specifically). */ +#define INST_MODRM_INCLUDED (1 << 30) +/* Indicates that the first (/destination) operand of the instruction is writable. */ +#define INST_DST_WR (1 << 31) + +#define INST_PRE_REPS (INST_PRE_REPNZ | INST_PRE_REP) +#define INST_PRE_LOKREP_MASK (INST_PRE_LOCK | INST_PRE_REPNZ | INST_PRE_REP) +#define INST_PRE_SEGOVRD_MASK32 (INST_PRE_CS | INST_PRE_SS | INST_PRE_DS | INST_PRE_ES) +#define INST_PRE_SEGOVRD_MASK64 (INST_PRE_FS | INST_PRE_GS) +#define INST_PRE_SEGOVRD_MASK (INST_PRE_SEGOVRD_MASK32 | INST_PRE_SEGOVRD_MASK64) + +/* Extended flags for VEX: */ +/* Indicates that the instruction might have VEX.L encoded. */ +#define INST_VEX_L (1) +/* Indicates that the instruction might have VEX.W encoded. */ +#define INST_VEX_W (1 << 1) +/* Indicates that the mnemonic of the instruction is based on the VEX.W bit. */ +#define INST_MNEMONIC_VEXW_BASED (1 << 2) +/* Indicates that the mnemonic of the instruction is based on the VEX.L bit. */ +#define INST_MNEMONIC_VEXL_BASED (1 << 3) +/* Forces the instruction to be encoded with VEX.L, otherwise it's undefined. */ +#define INST_FORCE_VEXL (1 << 4) +/* + * Indicates that the instruction is based on the MOD field of the ModRM byte. + * (MOD==11: got the right instruction, else skip +4 in prefixed table for the correct instruction). + */ +#define INST_MODRR_BASED (1 << 5) +/* Indicates that the instruction doesn't use the VVVV field of the VEX prefix, if it does then it's undecodable. */ +#define INST_VEX_V_UNUSED (1 << 6) + +/* Indication that the instruction is privileged (Ring 0), this should be checked on the opcodeId field. */ +#define OPCODE_ID_PRIVILEGED ((uint16_t)0x8000) + +/* + * Indicates which operand is being decoded. + * Destination (1st), Source (2nd), op3 (3rd), op4 (4th). + * Used to set the operands' fields in the _DInst structure! + */ +typedef enum {ONT_NONE = -1, ONT_1 = 0, ONT_2 = 1, ONT_3 = 2, ONT_4 = 3} _OperandNumberType; + +/* CPU Flags that instructions modify, test or undefine, in compacted form (CF,PF,AF,ZF,SF are 1:1 map to EFLAGS). */ +#define D_COMPACT_CF 1 /* Carry */ +#define D_COMPACT_PF 4 /* Parity */ +#define D_COMPACT_AF 0x10 /* Auxiliary */ +#define D_COMPACT_ZF 0x40 /* Zero */ +#define D_COMPACT_SF 0x80 /* Sign */ +/* The following flags have to be translated to EFLAGS. */ +#define D_COMPACT_IF 2 /* Interrupt */ +#define D_COMPACT_DF 8 /* Direction */ +#define D_COMPACT_OF 0x20 /* Overflow */ + +/* The mask of flags that are already compatible with EFLAGS. */ +#define D_COMPACT_SAME_FLAGS (D_COMPACT_CF | D_COMPACT_PF | D_COMPACT_AF | D_COMPACT_ZF | D_COMPACT_SF) + +/* + * In order to save more space for storing the DB statically, + * I came up with another level of shared info. + * Because I saw that most of the information that instructions use repeats itself. + * + * Info about the instruction, source/dest types, meta and flags. + * _InstInfo points to a table of _InstSharedInfo. + */ +typedef struct { + uint8_t flagsIndex; /* An index into FlagsTables */ + uint8_t s, d; /* OpType. */ + uint8_t meta; /* Hi 5 bits = Instruction set class | Lo 3 bits = flow control flags. */ + /* + * The following are CPU flag masks that the instruction changes. + * The flags are compacted so 8 bits representation is enough. + * They will be expanded in runtime to be compatible to EFLAGS. + */ + uint8_t modifiedFlagsMask; + uint8_t testedFlagsMask; + uint8_t undefinedFlagsMask; +} _InstSharedInfo; + +/* + * This structure is used for the instructions DB and NOT for the disassembled result code! + * This is the BASE structure, there are extensions to this structure below. + */ +typedef struct { + uint16_t sharedIndex; /* An index into the SharedInfoTable. */ + uint16_t opcodeId; /* The opcodeId is really a byte-offset into the mnemonics table. MSB is a privileged indication. */ +} _InstInfo; + +/* + * There are merely few instructions which need a second mnemonic for 32 bits. + * Or a third for 64 bits. Therefore sometimes the second mnemonic is empty but not the third. + * In all decoding modes the first mnemonic is the default. + * A flag will indicate it uses another mnemonic. + * + * There are a couple of (SSE4) instructions in the whole DB which need both op3 and 3rd mnemonic for 64bits, + * therefore, I decided to make the extended structure contain all extra info in the same structure. + * There are a few instructions (SHLD/SHRD/IMUL and SSE too) which use third operand (or a fourth). + * A flag will indicate it uses a third/fourth operand. + */ +typedef struct { + /* Base structure (doesn't get accessed directly from code). */ + _InstInfo BASE; + + /* Extended starts here. */ + uint8_t flagsEx; /* 8 bits are enough, in the future we might make it a bigger integer. */ + uint8_t op3, op4; /* OpType. */ + uint16_t opcodeId2, opcodeId3; +} _InstInfoEx; + +/* Trie data structure node type: */ +typedef enum { + INT_NOTEXISTS = 0, /* Not exists. */ + INT_INFO = 1, /* It's an instruction info. */ + INT_INFOEX, + INT_LIST_GROUP, + INT_LIST_FULL, + INT_LIST_DIVIDED, + INT_LIST_PREFIXED +} _InstNodeType; + +/* Used to check instType < INT_INFOS, means we got an inst-info. Cause it has to be only one of them. */ +#define INT_INFOS (INT_LIST_GROUP) + +/* Instruction node is treated as { int index:13; int type:3; } */ +typedef uint16_t _InstNode; + +_InstInfo* inst_lookup(_CodeInfo* ci, _PrefixState* ps); +_InstInfo* inst_lookup_3dnow(_CodeInfo* ci); + +#endif /* INSTRUCTIONS_H */ diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/insts.c b/module/src/main/cpp/whale/src/dbi/x86/distorm/insts.c new file mode 100644 index 00000000..f7d283e5 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/insts.c @@ -0,0 +1,7940 @@ +/* +insts.c + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ + + +#include "config.h" +#include "insts.h" +#include "instructions.h" + + +/* + * GENERATED BY disOps at Sat Nov 10 19:20:27 2018 + */ + +_InstInfo II_MOVSXD = /*II*/ {0x1d4, 10027}; +_InstInfo II_NOP = /*II*/ {0x53, 581}; +_InstInfo II_PAUSE = /*II*/ {0x88, 10035}; +_InstInfo II_WAIT = /*II*/ {0x53, 10042}; +_InstInfo II_RDRAND = /*II*/ {0x1d5, 10048}; +_InstInfo II_3DNOW = /*II*/ {0x1d6, 10056}; + +_iflags FlagsTable[101] = { +0x80000011, +0x80000000, +0x800400, +0x80800400, +0x800080, +0x800100, +0x80800100, +0x800200, +0x80800200, +0x800000, +0x1, +0x0, +0x80800000, +0x1000000, +0x81000000, +0x808000, +0x800001, +0x80020001, +0x1002000, +0x60, +0x64, +0x80000001, +0x4010000, +0x1008000, +0x80000060, +0x83000064, +0x3000064, +0x83000000, +0x3008000, +0x200, +0xc000, +0x4014000, +0x8, +0x81000009, +0x9, +0x80000009, +0x1000808, +0x81000808, +0x80020009, +0x1001008, +0x81001008, +0x80000019, +0x3000009, +0x83000009, +0x83000008, +0xc0000011, +0x40000001, +0xc0800011, +0x40800001, +0xc0000019, +0xc1000001, +0xc0000001, +0xc0000003, +0x41000000, +0x40000000, +0x40000008, +0x40000009, +0x41000001, +0x43000001, +0x40000003, +0x48000000, +0x200009, +0x20000009, +0x60020009, +0x60000009, +0x80090009, +0x200b0009, +0x20020009, +0x80100009, +0x21100009, +0x87000009, +0x20009, +0x20000008, +0x1000009, +0x10020009, +0x160009, +0x100009, +0x47000009, +0x47090009, +0x40090009, +0x80002009, +0xc0000009, +0x2001, +0x80002001, +0x410009, +0x20420009, +0x20060009, +0x120009, +0x21020009, +0xc7000019, +0x20100009, +0xc0002009, +0x40002008, +0xc0000000, +0xc0002008, +0x4020009, +0x40100009, +0x60120009, +0x41000009, +0x83000001, +0x200001 +}; + +_InstNode Table_0F = 256; +_InstNode Table_0F_0F = 1440; +_InstNode Table_0F_38 = 1896; +_InstNode Table_0F_3A = 2152; + +_InstInfo InstInfos[1246] = { + /*II_00*/ {0x0, 11}, + /*II_01*/ {0x1, 11}, + /*II_02*/ {0x2, 11}, + /*II_03*/ {0x3, 11}, + /*II_04*/ {0x4, 11}, + /*II_05*/ {0x5, 11}, + /*II_06*/ {0x6, 16}, + /*II_07*/ {0x7, 22}, + /*II_08*/ {0x8, 27}, + /*II_09*/ {0x9, 27}, + /*II_0A*/ {0xa, 27}, + /*II_0B*/ {0xb, 27}, + /*II_0C*/ {0xc, 27}, + /*II_0D*/ {0xd, 27}, + /*II_0E*/ {0xe, 16}, + /*II_10*/ {0xf, 31}, + /*II_11*/ {0x10, 31}, + /*II_12*/ {0x11, 31}, + /*II_13*/ {0x12, 31}, + /*II_14*/ {0x13, 31}, + /*II_15*/ {0x14, 31}, + /*II_16*/ {0x15, 16}, + /*II_17*/ {0x16, 22}, + /*II_18*/ {0xf, 36}, + /*II_19*/ {0x10, 36}, + /*II_1A*/ {0x11, 36}, + /*II_1B*/ {0x12, 36}, + /*II_1C*/ {0x13, 36}, + /*II_1D*/ {0x14, 36}, + /*II_1E*/ {0x17, 16}, + /*II_1F*/ {0x18, 22}, + /*II_20*/ {0x19, 41}, + /*II_21*/ {0x1a, 41}, + /*II_22*/ {0x1b, 41}, + /*II_23*/ {0x1c, 41}, + /*II_24*/ {0x1d, 41}, + /*II_25*/ {0x1e, 41}, + /*II_27*/ {0x1f, 46}, + /*II_28*/ {0x0, 51}, + /*II_29*/ {0x1, 51}, + /*II_2A*/ {0x2, 51}, + /*II_2B*/ {0x3, 51}, + /*II_2C*/ {0x4, 51}, + /*II_2D*/ {0x5, 51}, + /*II_2F*/ {0x1f, 56}, + /*II_30*/ {0x20, 61}, + /*II_31*/ {0x21, 61}, + /*II_32*/ {0x22, 61}, + /*II_33*/ {0x23, 61}, + /*II_34*/ {0x24, 61}, + /*II_35*/ {0x25, 61}, + /*II_37*/ {0x26, 66}, + /*II_38*/ {0x27, 71}, + /*II_39*/ {0x28, 71}, + /*II_3A*/ {0x29, 71}, + /*II_3B*/ {0x2a, 71}, + /*II_3C*/ {0x2b, 71}, + /*II_3D*/ {0x2c, 71}, + /*II_3F*/ {0x26, 76}, + /*II_40*/ {0x2d, 81}, + /*II_40*/ {0x2d, 81}, + /*II_40*/ {0x2d, 81}, + /*II_40*/ {0x2d, 81}, + /*II_40*/ {0x2d, 81}, + /*II_40*/ {0x2d, 81}, + /*II_40*/ {0x2d, 81}, + /*II_40*/ {0x2d, 81}, + /*II_48*/ {0x2d, 86}, + /*II_48*/ {0x2d, 86}, + /*II_48*/ {0x2d, 86}, + /*II_48*/ {0x2d, 86}, + /*II_48*/ {0x2d, 86}, + /*II_48*/ {0x2d, 86}, + /*II_48*/ {0x2d, 86}, + /*II_48*/ {0x2d, 86}, + /*II_50*/ {0x2e, 16}, + /*II_50*/ {0x2e, 16}, + /*II_50*/ {0x2e, 16}, + /*II_50*/ {0x2e, 16}, + /*II_50*/ {0x2e, 16}, + /*II_50*/ {0x2e, 16}, + /*II_50*/ {0x2e, 16}, + /*II_50*/ {0x2e, 16}, + /*II_58*/ {0x2f, 22}, + /*II_58*/ {0x2f, 22}, + /*II_58*/ {0x2f, 22}, + /*II_58*/ {0x2f, 22}, + /*II_58*/ {0x2f, 22}, + /*II_58*/ {0x2f, 22}, + /*II_58*/ {0x2f, 22}, + /*II_58*/ {0x2f, 22}, + /*II_60*/ {0x30, 91}, + /*II_61*/ {0x30, 98}, + /*II_62*/ {0x31, 104}, + /*II_63*/ {0x32, 111}, + /*II_68*/ {0x33, 16}, + /*II_6A*/ {0x35, 16}, + /*II_6C*/ {0x36, 32891}, + /*II_6D*/ {0x37, 32891}, + /*II_6E*/ {0x38, 32896}, + /*II_6F*/ {0x39, 32896}, + /*II_70*/ {0x3a, 134}, + /*II_71*/ {0x3a, 138}, + /*II_72*/ {0x3b, 143}, + /*II_73*/ {0x3b, 147}, + /*II_74*/ {0x3c, 152}, + /*II_75*/ {0x3c, 156}, + /*II_76*/ {0x3d, 161}, + /*II_77*/ {0x3d, 166}, + /*II_78*/ {0x3e, 170}, + /*II_79*/ {0x3e, 174}, + /*II_7A*/ {0x3f, 179}, + /*II_7B*/ {0x3f, 183}, + /*II_7C*/ {0x40, 188}, + /*II_7D*/ {0x40, 192}, + /*II_7E*/ {0x41, 197}, + /*II_7F*/ {0x41, 202}, + /*II_84*/ {0x42, 206}, + /*II_85*/ {0x43, 206}, + /*II_86*/ {0x44, 212}, + /*II_87*/ {0x45, 212}, + /*II_88*/ {0x46, 218}, + /*II_89*/ {0x47, 218}, + /*II_8A*/ {0x48, 218}, + /*II_8B*/ {0x49, 218}, + /*II_8C*/ {0x4a, 218}, + /*II_8D*/ {0x4b, 223}, + /*II_8E*/ {0x4c, 218}, + /*II_90*/ {0x4d, 212}, + /*II_91*/ {0x4d, 212}, + /*II_92*/ {0x4d, 212}, + /*II_93*/ {0x4d, 212}, + /*II_94*/ {0x4d, 212}, + /*II_95*/ {0x4d, 212}, + /*II_96*/ {0x4d, 212}, + /*II_97*/ {0x4d, 212}, + /*II_9A*/ {0x4f, 260}, + /*II_9C*/ {0x50, 270}, + /*II_9D*/ {0x51, 277}, + /*II_9E*/ {0x52, 283}, + /*II_9F*/ {0x53, 289}, + /*II_A0*/ {0x54, 218}, + /*II_A1*/ {0x55, 218}, + /*II_A2*/ {0x56, 218}, + /*II_A3*/ {0x57, 218}, + /*II_A4*/ {0x58, 295}, + /*II_A5*/ {0x59, 295}, + /*II_A6*/ {0x5a, 301}, + /*II_A7*/ {0x5b, 301}, + /*II_A8*/ {0x5c, 206}, + /*II_A9*/ {0x5d, 206}, + /*II_AA*/ {0x5e, 307}, + /*II_AB*/ {0x5f, 307}, + /*II_AC*/ {0x60, 313}, + /*II_AD*/ {0x61, 313}, + /*II_AE*/ {0x62, 319}, + /*II_AF*/ {0x63, 319}, + /*II_B0*/ {0x64, 218}, + /*II_B0*/ {0x64, 218}, + /*II_B0*/ {0x64, 218}, + /*II_B0*/ {0x64, 218}, + /*II_B0*/ {0x64, 218}, + /*II_B0*/ {0x64, 218}, + /*II_B0*/ {0x64, 218}, + /*II_B0*/ {0x64, 218}, + /*II_B8*/ {0x65, 218}, + /*II_B8*/ {0x65, 218}, + /*II_B8*/ {0x65, 218}, + /*II_B8*/ {0x65, 218}, + /*II_B8*/ {0x65, 218}, + /*II_B8*/ {0x65, 218}, + /*II_B8*/ {0x65, 218}, + /*II_B8*/ {0x65, 218}, + /*II_C2*/ {0x66, 325}, + /*II_C3*/ {0x67, 325}, + /*II_C4*/ {0x68, 330}, + /*II_C5*/ {0x68, 335}, + /*II_C8*/ {0x69, 340}, + /*II_C9*/ {0x6a, 347}, + /*II_CA*/ {0x6b, 354}, + /*II_CB*/ {0x6c, 354}, + /*II_CC*/ {0x6d, 360}, + /*II_CD*/ {0x6e, 367}, + /*II_CE*/ {0x6f, 372}, + /*II_CF*/ {0x70, 33146}, + /*II_D4*/ {0x71, 384}, + /*II_D5*/ {0x71, 389}, + /*II_D6*/ {0x72, 394}, + /*II_D7*/ {0x73, 400}, + /*II_E0*/ {0x74, 406}, + /*II_E1*/ {0x74, 414}, + /*II_E2*/ {0x75, 421}, + /*II_E4*/ {0x77, 33215}, + /*II_E5*/ {0x78, 33215}, + /*II_E6*/ {0x79, 33219}, + /*II_E7*/ {0x7a, 33219}, + /*II_E8*/ {0x7b, 456}, + /*II_E9*/ {0x7c, 462}, + /*II_EA*/ {0x7d, 467}, + /*II_EB*/ {0x7e, 462}, + /*II_EC*/ {0x7f, 33215}, + /*II_ED*/ {0x80, 33215}, + /*II_EE*/ {0x81, 33219}, + /*II_EF*/ {0x82, 33219}, + /*II_F1*/ {0x6d, 476}, + /*II_F4*/ {0x53, 33250}, + /*II_F5*/ {0x83, 487}, + /*II_F8*/ {0x83, 492}, + /*II_F9*/ {0x83, 497}, + /*II_FA*/ {0x84, 33270}, + /*II_FB*/ {0x84, 33275}, + /*II_FC*/ {0x85, 512}, + /*II_FD*/ {0x85, 517}, + /*II_0F_02*/ {0x86, 522}, + /*II_0F_03*/ {0x86, 527}, + /*II_0F_05*/ {0x87, 532}, + /*II_0F_06*/ {0x88, 33309}, + /*II_0F_07*/ {0x87, 547}, + /*II_0F_08*/ {0x88, 33323}, + /*II_0F_09*/ {0x88, 33329}, + /*II_0F_0B*/ {0x89, 569}, + /*II_0F_0E*/ {0x8a, 574}, + /*II_0F_1F*/ {0x8b, 581}, + /*II_0F_20*/ {0x8c, 32986}, + /*II_0F_21*/ {0x8d, 32986}, + /*II_0F_22*/ {0x8e, 32986}, + /*II_0F_23*/ {0x8f, 32986}, + /*II_0F_30*/ {0x88, 33354}, + /*II_0F_31*/ {0x88, 33361}, + /*II_0F_32*/ {0x88, 33368}, + /*II_0F_33*/ {0x88, 33375}, + /*II_0F_34*/ {0x87, 614}, + /*II_0F_35*/ {0x87, 624}, + /*II_0F_37*/ {0x90, 633}, + /*II_0F_40*/ {0x91, 641}, + /*II_0F_41*/ {0x91, 648}, + /*II_0F_42*/ {0x92, 656}, + /*II_0F_43*/ {0x92, 663}, + /*II_0F_44*/ {0x93, 671}, + /*II_0F_45*/ {0x93, 678}, + /*II_0F_46*/ {0x94, 686}, + /*II_0F_47*/ {0x94, 694}, + /*II_0F_48*/ {0x95, 701}, + /*II_0F_49*/ {0x95, 708}, + /*II_0F_4A*/ {0x96, 716}, + /*II_0F_4B*/ {0x96, 723}, + /*II_0F_4C*/ {0x97, 731}, + /*II_0F_4D*/ {0x97, 738}, + /*II_0F_4E*/ {0x98, 746}, + /*II_0F_4F*/ {0x98, 754}, + /*II_0F_80*/ {0x99, 134}, + /*II_0F_81*/ {0x99, 138}, + /*II_0F_82*/ {0x9a, 143}, + /*II_0F_83*/ {0x9a, 147}, + /*II_0F_84*/ {0x9b, 152}, + /*II_0F_85*/ {0x9b, 156}, + /*II_0F_86*/ {0x9c, 161}, + /*II_0F_87*/ {0x9c, 166}, + /*II_0F_88*/ {0x9d, 170}, + /*II_0F_89*/ {0x9d, 174}, + /*II_0F_8A*/ {0x9e, 179}, + /*II_0F_8B*/ {0x9e, 183}, + /*II_0F_8C*/ {0x9f, 188}, + /*II_0F_8D*/ {0x9f, 192}, + /*II_0F_8E*/ {0xa0, 197}, + /*II_0F_8F*/ {0xa0, 202}, + /*II_0F_90*/ {0xa1, 761}, + /*II_0F_91*/ {0xa1, 767}, + /*II_0F_92*/ {0xa2, 774}, + /*II_0F_93*/ {0xa2, 780}, + /*II_0F_94*/ {0xa3, 787}, + /*II_0F_95*/ {0xa3, 793}, + /*II_0F_96*/ {0xa4, 800}, + /*II_0F_97*/ {0xa4, 807}, + /*II_0F_98*/ {0xa5, 813}, + /*II_0F_99*/ {0xa5, 819}, + /*II_0F_9A*/ {0xa6, 826}, + /*II_0F_9B*/ {0xa6, 832}, + /*II_0F_9C*/ {0xa7, 839}, + /*II_0F_9D*/ {0xa7, 845}, + /*II_0F_9E*/ {0xa8, 852}, + /*II_0F_9F*/ {0xa8, 859}, + /*II_0F_A0*/ {0xa9, 16}, + /*II_0F_A1*/ {0xaa, 22}, + /*II_0F_A2*/ {0x88, 865}, + /*II_0F_A3*/ {0xab, 872}, + /*II_0F_A8*/ {0xad, 16}, + /*II_0F_A9*/ {0xae, 22}, + /*II_0F_AA*/ {0xaf, 882}, + /*II_0F_AB*/ {0xb0, 887}, + /*II_0F_AF*/ {0xb1, 117}, + /*II_0F_B0*/ {0xb2, 898}, + /*II_0F_B1*/ {0xb3, 898}, + /*II_0F_B2*/ {0xb4, 907}, + /*II_0F_B3*/ {0xb0, 912}, + /*II_0F_B4*/ {0xb4, 917}, + /*II_0F_B5*/ {0xb4, 922}, + /*II_0F_B6*/ {0xb5, 927}, + /*II_0F_B7*/ {0xb6, 927}, + /*II_0F_B9*/ {0x89, 569}, + /*II_0F_BB*/ {0xb0, 934}, + /*II_0F_BE*/ {0xb5, 939}, + /*II_0F_BF*/ {0xb6, 939}, + /*II_0F_C0*/ {0xb2, 946}, + /*II_0F_C1*/ {0xb3, 946}, + /*II_0F_C3*/ {0xb7, 952}, + /*II_0F_C8*/ {0xb8, 960}, + /*II_0F_C8*/ {0xb8, 960}, + /*II_0F_C8*/ {0xb8, 960}, + /*II_0F_C8*/ {0xb8, 960}, + /*II_0F_C8*/ {0xb8, 960}, + /*II_0F_C8*/ {0xb8, 960}, + /*II_0F_C8*/ {0xb8, 960}, + /*II_0F_C8*/ {0xb8, 960}, + /*II_80_00*/ {0xb9, 11}, + /*II_80_01*/ {0xba, 27}, + /*II_80_02*/ {0xbb, 31}, + /*II_80_03*/ {0xbb, 36}, + /*II_80_04*/ {0xbc, 41}, + /*II_80_05*/ {0xb9, 51}, + /*II_80_06*/ {0xbd, 61}, + /*II_80_07*/ {0xbe, 71}, + /*II_81_00*/ {0xbf, 11}, + /*II_81_01*/ {0xc0, 27}, + /*II_81_02*/ {0xc1, 31}, + /*II_81_03*/ {0xc1, 36}, + /*II_81_04*/ {0xc2, 41}, + /*II_81_05*/ {0xbf, 51}, + /*II_81_06*/ {0xc3, 61}, + /*II_81_07*/ {0xc4, 71}, + /*II_82_00*/ {0xc5, 11}, + /*II_82_01*/ {0xc6, 27}, + /*II_82_02*/ {0xc7, 31}, + /*II_82_03*/ {0xc7, 36}, + /*II_82_04*/ {0xc8, 41}, + /*II_82_05*/ {0xc5, 51}, + /*II_82_06*/ {0xc9, 61}, + /*II_82_07*/ {0xca, 71}, + /*II_83_00*/ {0xcb, 11}, + /*II_83_01*/ {0xcc, 27}, + /*II_83_02*/ {0xcd, 31}, + /*II_83_03*/ {0xcd, 36}, + /*II_83_04*/ {0xce, 41}, + /*II_83_05*/ {0xcb, 51}, + /*II_83_06*/ {0xcf, 61}, + /*II_83_07*/ {0xd0, 71}, + /*II_8F_00*/ {0xd1, 22}, + /*II_C0_00*/ {0xd2, 967}, + /*II_C0_01*/ {0xd2, 972}, + /*II_C0_02*/ {0xd3, 977}, + /*II_C0_03*/ {0xd3, 982}, + /*II_C0_04*/ {0xd4, 987}, + /*II_C0_05*/ {0xd4, 992}, + /*II_C0_06*/ {0xd4, 997}, + /*II_C0_07*/ {0xd4, 1002}, + /*II_C1_00*/ {0xd5, 967}, + /*II_C1_01*/ {0xd5, 972}, + /*II_C1_02*/ {0xd6, 977}, + /*II_C1_03*/ {0xd6, 982}, + /*II_C1_04*/ {0xd7, 987}, + /*II_C1_05*/ {0xd7, 992}, + /*II_C1_06*/ {0xd7, 997}, + /*II_C1_07*/ {0xd7, 1002}, + /*II_C6_00*/ {0xd8, 218}, + /*II_C6_F8*/ {0xd9, 1007}, + /*II_C7_00*/ {0xda, 218}, + /*II_C7_F8*/ {0xdb, 1015}, + /*II_D0_00*/ {0xdc, 967}, + /*II_D0_01*/ {0xdc, 972}, + /*II_D0_02*/ {0xdd, 977}, + /*II_D0_03*/ {0xdd, 982}, + /*II_D0_04*/ {0xde, 987}, + /*II_D0_05*/ {0xde, 992}, + /*II_D0_06*/ {0xde, 997}, + /*II_D0_07*/ {0xde, 1002}, + /*II_D1_00*/ {0xdf, 967}, + /*II_D1_01*/ {0xdf, 972}, + /*II_D1_02*/ {0xe0, 977}, + /*II_D1_03*/ {0xe0, 982}, + /*II_D1_04*/ {0xe1, 987}, + /*II_D1_05*/ {0xe1, 992}, + /*II_D1_06*/ {0xe1, 997}, + /*II_D1_07*/ {0xe1, 1002}, + /*II_D2_00*/ {0xe2, 967}, + /*II_D2_01*/ {0xe2, 972}, + /*II_D2_02*/ {0xe3, 977}, + /*II_D2_03*/ {0xe3, 982}, + /*II_D2_04*/ {0xe4, 987}, + /*II_D2_05*/ {0xe4, 992}, + /*II_D2_06*/ {0xe4, 997}, + /*II_D2_07*/ {0xe4, 1002}, + /*II_D3_00*/ {0xe5, 967}, + /*II_D3_01*/ {0xe5, 972}, + /*II_D3_02*/ {0xe6, 977}, + /*II_D3_03*/ {0xe6, 982}, + /*II_D3_04*/ {0xe7, 987}, + /*II_D3_05*/ {0xe7, 992}, + /*II_D3_06*/ {0xe7, 997}, + /*II_D3_07*/ {0xe7, 1002}, + /*II_D8_00*/ {0xe8, 1023}, + /*II_D8_01*/ {0xe8, 1029}, + /*II_D8_02*/ {0xe8, 1035}, + /*II_D8_03*/ {0xe8, 1041}, + /*II_D8_04*/ {0xe8, 1048}, + /*II_D8_05*/ {0xe8, 1054}, + /*II_D8_06*/ {0xe8, 1061}, + /*II_D8_07*/ {0xe8, 1067}, + /*II_D8_C0*/ {0xe9, 1023}, + /*II_D8_C0*/ {0xe9, 1023}, + /*II_D8_C0*/ {0xe9, 1023}, + /*II_D8_C0*/ {0xe9, 1023}, + /*II_D8_C0*/ {0xe9, 1023}, + /*II_D8_C0*/ {0xe9, 1023}, + /*II_D8_C0*/ {0xe9, 1023}, + /*II_D8_C0*/ {0xe9, 1023}, + /*II_D8_C8*/ {0xe9, 1029}, + /*II_D8_C8*/ {0xe9, 1029}, + /*II_D8_C8*/ {0xe9, 1029}, + /*II_D8_C8*/ {0xe9, 1029}, + /*II_D8_C8*/ {0xe9, 1029}, + /*II_D8_C8*/ {0xe9, 1029}, + /*II_D8_C8*/ {0xe9, 1029}, + /*II_D8_C8*/ {0xe9, 1029}, + /*II_D8_D0*/ {0xea, 1035}, + /*II_D8_D0*/ {0xea, 1035}, + /*II_D8_D0*/ {0xea, 1035}, + /*II_D8_D0*/ {0xea, 1035}, + /*II_D8_D0*/ {0xea, 1035}, + /*II_D8_D0*/ {0xea, 1035}, + /*II_D8_D0*/ {0xea, 1035}, + /*II_D8_D0*/ {0xea, 1035}, + /*II_D8_D8*/ {0xea, 1041}, + /*II_D8_D9*/ {0xeb, 1041}, + /*II_D8_D8*/ {0xea, 1041}, + /*II_D8_D8*/ {0xea, 1041}, + /*II_D8_D8*/ {0xea, 1041}, + /*II_D8_D8*/ {0xea, 1041}, + /*II_D8_D8*/ {0xea, 1041}, + /*II_D8_D8*/ {0xea, 1041}, + /*II_D8_E0*/ {0xe9, 1048}, + /*II_D8_E0*/ {0xe9, 1048}, + /*II_D8_E0*/ {0xe9, 1048}, + /*II_D8_E0*/ {0xe9, 1048}, + /*II_D8_E0*/ {0xe9, 1048}, + /*II_D8_E0*/ {0xe9, 1048}, + /*II_D8_E0*/ {0xe9, 1048}, + /*II_D8_E0*/ {0xe9, 1048}, + /*II_D8_E8*/ {0xe9, 1054}, + /*II_D8_E8*/ {0xe9, 1054}, + /*II_D8_E8*/ {0xe9, 1054}, + /*II_D8_E8*/ {0xe9, 1054}, + /*II_D8_E8*/ {0xe9, 1054}, + /*II_D8_E8*/ {0xe9, 1054}, + /*II_D8_E8*/ {0xe9, 1054}, + /*II_D8_E8*/ {0xe9, 1054}, + /*II_D8_F0*/ {0xe9, 1061}, + /*II_D8_F0*/ {0xe9, 1061}, + /*II_D8_F0*/ {0xe9, 1061}, + /*II_D8_F0*/ {0xe9, 1061}, + /*II_D8_F0*/ {0xe9, 1061}, + /*II_D8_F0*/ {0xe9, 1061}, + /*II_D8_F0*/ {0xe9, 1061}, + /*II_D8_F0*/ {0xe9, 1061}, + /*II_D8_F8*/ {0xe9, 1067}, + /*II_D8_F8*/ {0xe9, 1067}, + /*II_D8_F8*/ {0xe9, 1067}, + /*II_D8_F8*/ {0xe9, 1067}, + /*II_D8_F8*/ {0xe9, 1067}, + /*II_D8_F8*/ {0xe9, 1067}, + /*II_D8_F8*/ {0xe9, 1067}, + /*II_D8_F8*/ {0xe9, 1067}, + /*II_D9_00*/ {0xe8, 1074}, + /*II_D9_02*/ {0xec, 1079}, + /*II_D9_03*/ {0xec, 1084}, + /*II_D9_04*/ {0xed, 1090}, + /*II_D9_05*/ {0xee, 1098}, + /*II_D9_C0*/ {0xea, 1074}, + /*II_D9_C0*/ {0xea, 1074}, + /*II_D9_C0*/ {0xea, 1074}, + /*II_D9_C0*/ {0xea, 1074}, + /*II_D9_C0*/ {0xea, 1074}, + /*II_D9_C0*/ {0xea, 1074}, + /*II_D9_C0*/ {0xea, 1074}, + /*II_D9_C0*/ {0xea, 1074}, + /*II_D9_C8*/ {0xea, 1105}, + /*II_D9_C9*/ {0xeb, 1105}, + /*II_D9_C8*/ {0xea, 1105}, + /*II_D9_C8*/ {0xea, 1105}, + /*II_D9_C8*/ {0xea, 1105}, + /*II_D9_C8*/ {0xea, 1105}, + /*II_D9_C8*/ {0xea, 1105}, + /*II_D9_C8*/ {0xea, 1105}, + /*II_D9_D0*/ {0xeb, 1111}, + /*II_D9_E0*/ {0xeb, 1117}, + /*II_D9_E1*/ {0xeb, 1123}, + /*II_D9_E4*/ {0xeb, 1129}, + /*II_D9_E5*/ {0xeb, 1135}, + /*II_D9_E8*/ {0xeb, 1141}, + /*II_D9_E9*/ {0xeb, 1147}, + /*II_D9_EA*/ {0xeb, 1155}, + /*II_D9_EB*/ {0xeb, 1163}, + /*II_D9_EC*/ {0xeb, 1170}, + /*II_D9_ED*/ {0xeb, 1178}, + /*II_D9_EE*/ {0xeb, 1186}, + /*II_D9_F0*/ {0xeb, 1192}, + /*II_D9_F1*/ {0xeb, 1199}, + /*II_D9_F2*/ {0xeb, 1206}, + /*II_D9_F3*/ {0xeb, 1213}, + /*II_D9_F4*/ {0xeb, 1221}, + /*II_D9_F5*/ {0xeb, 1230}, + /*II_D9_F6*/ {0xeb, 1238}, + /*II_D9_F7*/ {0xeb, 1247}, + /*II_D9_F8*/ {0xeb, 1256}, + /*II_D9_F9*/ {0xeb, 1263}, + /*II_D9_FA*/ {0xeb, 1272}, + /*II_D9_FB*/ {0xeb, 1279}, + /*II_D9_FC*/ {0xeb, 1288}, + /*II_D9_FD*/ {0xeb, 1297}, + /*II_D9_FE*/ {0xeb, 1305}, + /*II_D9_FF*/ {0xeb, 1311}, + /*II_DA_00*/ {0xe8, 1317}, + /*II_DA_01*/ {0xe8, 1324}, + /*II_DA_02*/ {0xe8, 1331}, + /*II_DA_03*/ {0xe8, 1338}, + /*II_DA_04*/ {0xe8, 1346}, + /*II_DA_05*/ {0xe8, 1353}, + /*II_DA_06*/ {0xe8, 1361}, + /*II_DA_07*/ {0xe8, 1368}, + /*II_DA_C0*/ {0xef, 1376}, + /*II_DA_C0*/ {0xef, 1376}, + /*II_DA_C0*/ {0xef, 1376}, + /*II_DA_C0*/ {0xef, 1376}, + /*II_DA_C0*/ {0xef, 1376}, + /*II_DA_C0*/ {0xef, 1376}, + /*II_DA_C0*/ {0xef, 1376}, + /*II_DA_C0*/ {0xef, 1376}, + /*II_DA_C8*/ {0xf0, 1384}, + /*II_DA_C8*/ {0xf0, 1384}, + /*II_DA_C8*/ {0xf0, 1384}, + /*II_DA_C8*/ {0xf0, 1384}, + /*II_DA_C8*/ {0xf0, 1384}, + /*II_DA_C8*/ {0xf0, 1384}, + /*II_DA_C8*/ {0xf0, 1384}, + /*II_DA_C8*/ {0xf0, 1384}, + /*II_DA_D0*/ {0xf1, 1392}, + /*II_DA_D0*/ {0xf1, 1392}, + /*II_DA_D0*/ {0xf1, 1392}, + /*II_DA_D0*/ {0xf1, 1392}, + /*II_DA_D0*/ {0xf1, 1392}, + /*II_DA_D0*/ {0xf1, 1392}, + /*II_DA_D0*/ {0xf1, 1392}, + /*II_DA_D0*/ {0xf1, 1392}, + /*II_DA_D8*/ {0xf2, 1401}, + /*II_DA_D8*/ {0xf2, 1401}, + /*II_DA_D8*/ {0xf2, 1401}, + /*II_DA_D8*/ {0xf2, 1401}, + /*II_DA_D8*/ {0xf2, 1401}, + /*II_DA_D8*/ {0xf2, 1401}, + /*II_DA_D8*/ {0xf2, 1401}, + /*II_DA_D8*/ {0xf2, 1401}, + /*II_DA_E9*/ {0xeb, 1409}, + /*II_DB_00*/ {0xe8, 1418}, + /*II_DB_01*/ {0xf3, 1424}, + /*II_DB_02*/ {0xec, 1432}, + /*II_DB_03*/ {0xec, 1438}, + /*II_DB_05*/ {0xf4, 1074}, + /*II_DB_07*/ {0xf5, 1084}, + /*II_DB_C0*/ {0xef, 1445}, + /*II_DB_C0*/ {0xef, 1445}, + /*II_DB_C0*/ {0xef, 1445}, + /*II_DB_C0*/ {0xef, 1445}, + /*II_DB_C0*/ {0xef, 1445}, + /*II_DB_C0*/ {0xef, 1445}, + /*II_DB_C0*/ {0xef, 1445}, + /*II_DB_C0*/ {0xef, 1445}, + /*II_DB_C8*/ {0xf0, 1454}, + /*II_DB_C8*/ {0xf0, 1454}, + /*II_DB_C8*/ {0xf0, 1454}, + /*II_DB_C8*/ {0xf0, 1454}, + /*II_DB_C8*/ {0xf0, 1454}, + /*II_DB_C8*/ {0xf0, 1454}, + /*II_DB_C8*/ {0xf0, 1454}, + /*II_DB_C8*/ {0xf0, 1454}, + /*II_DB_D0*/ {0xf1, 1463}, + /*II_DB_D0*/ {0xf1, 1463}, + /*II_DB_D0*/ {0xf1, 1463}, + /*II_DB_D0*/ {0xf1, 1463}, + /*II_DB_D0*/ {0xf1, 1463}, + /*II_DB_D0*/ {0xf1, 1463}, + /*II_DB_D0*/ {0xf1, 1463}, + /*II_DB_D0*/ {0xf1, 1463}, + /*II_DB_D8*/ {0xf2, 1473}, + /*II_DB_D8*/ {0xf2, 1473}, + /*II_DB_D8*/ {0xf2, 1473}, + /*II_DB_D8*/ {0xf2, 1473}, + /*II_DB_D8*/ {0xf2, 1473}, + /*II_DB_D8*/ {0xf2, 1473}, + /*II_DB_D8*/ {0xf2, 1473}, + /*II_DB_D8*/ {0xf2, 1473}, + /*II_DB_E0*/ {0xeb, 1482}, + /*II_DB_E1*/ {0xeb, 1488}, + /*II_DB_E4*/ {0xeb, 1496}, + /*II_DB_E8*/ {0xf6, 1504}, + /*II_DB_E8*/ {0xf6, 1504}, + /*II_DB_E8*/ {0xf6, 1504}, + /*II_DB_E8*/ {0xf6, 1504}, + /*II_DB_E8*/ {0xf6, 1504}, + /*II_DB_E8*/ {0xf6, 1504}, + /*II_DB_E8*/ {0xf6, 1504}, + /*II_DB_E8*/ {0xf6, 1504}, + /*II_DB_F0*/ {0xf7, 1512}, + /*II_DB_F0*/ {0xf7, 1512}, + /*II_DB_F0*/ {0xf7, 1512}, + /*II_DB_F0*/ {0xf7, 1512}, + /*II_DB_F0*/ {0xf7, 1512}, + /*II_DB_F0*/ {0xf7, 1512}, + /*II_DB_F0*/ {0xf7, 1512}, + /*II_DB_F0*/ {0xf7, 1512}, + /*II_DC_00*/ {0xf8, 1023}, + /*II_DC_01*/ {0xf8, 1029}, + /*II_DC_02*/ {0xf8, 1035}, + /*II_DC_03*/ {0xf8, 1041}, + /*II_DC_04*/ {0xf8, 1048}, + /*II_DC_05*/ {0xf8, 1054}, + /*II_DC_06*/ {0xf8, 1061}, + /*II_DC_07*/ {0xf8, 1067}, + /*II_DC_C0*/ {0xf9, 1023}, + /*II_DC_C0*/ {0xf9, 1023}, + /*II_DC_C0*/ {0xf9, 1023}, + /*II_DC_C0*/ {0xf9, 1023}, + /*II_DC_C0*/ {0xf9, 1023}, + /*II_DC_C0*/ {0xf9, 1023}, + /*II_DC_C0*/ {0xf9, 1023}, + /*II_DC_C0*/ {0xf9, 1023}, + /*II_DC_C8*/ {0xf9, 1029}, + /*II_DC_C8*/ {0xf9, 1029}, + /*II_DC_C8*/ {0xf9, 1029}, + /*II_DC_C8*/ {0xf9, 1029}, + /*II_DC_C8*/ {0xf9, 1029}, + /*II_DC_C8*/ {0xf9, 1029}, + /*II_DC_C8*/ {0xf9, 1029}, + /*II_DC_C8*/ {0xf9, 1029}, + /*II_DC_E0*/ {0xf9, 1054}, + /*II_DC_E0*/ {0xf9, 1054}, + /*II_DC_E0*/ {0xf9, 1054}, + /*II_DC_E0*/ {0xf9, 1054}, + /*II_DC_E0*/ {0xf9, 1054}, + /*II_DC_E0*/ {0xf9, 1054}, + /*II_DC_E0*/ {0xf9, 1054}, + /*II_DC_E0*/ {0xf9, 1054}, + /*II_DC_E8*/ {0xf9, 1048}, + /*II_DC_E8*/ {0xf9, 1048}, + /*II_DC_E8*/ {0xf9, 1048}, + /*II_DC_E8*/ {0xf9, 1048}, + /*II_DC_E8*/ {0xf9, 1048}, + /*II_DC_E8*/ {0xf9, 1048}, + /*II_DC_E8*/ {0xf9, 1048}, + /*II_DC_E8*/ {0xf9, 1048}, + /*II_DC_F0*/ {0xf9, 1067}, + /*II_DC_F0*/ {0xf9, 1067}, + /*II_DC_F0*/ {0xf9, 1067}, + /*II_DC_F0*/ {0xf9, 1067}, + /*II_DC_F0*/ {0xf9, 1067}, + /*II_DC_F0*/ {0xf9, 1067}, + /*II_DC_F0*/ {0xf9, 1067}, + /*II_DC_F0*/ {0xf9, 1067}, + /*II_DC_F8*/ {0xf9, 1061}, + /*II_DC_F8*/ {0xf9, 1061}, + /*II_DC_F8*/ {0xf9, 1061}, + /*II_DC_F8*/ {0xf9, 1061}, + /*II_DC_F8*/ {0xf9, 1061}, + /*II_DC_F8*/ {0xf9, 1061}, + /*II_DC_F8*/ {0xf9, 1061}, + /*II_DC_F8*/ {0xf9, 1061}, + /*II_DD_00*/ {0xf8, 1074}, + /*II_DD_01*/ {0xfa, 1424}, + /*II_DD_02*/ {0xfb, 1079}, + /*II_DD_03*/ {0xfb, 1084}, + /*II_DD_04*/ {0xed, 1519}, + /*II_DD_C0*/ {0xea, 1527}, + /*II_DD_C0*/ {0xea, 1527}, + /*II_DD_C0*/ {0xea, 1527}, + /*II_DD_C0*/ {0xea, 1527}, + /*II_DD_C0*/ {0xea, 1527}, + /*II_DD_C0*/ {0xea, 1527}, + /*II_DD_C0*/ {0xea, 1527}, + /*II_DD_C0*/ {0xea, 1527}, + /*II_DD_D0*/ {0xea, 1079}, + /*II_DD_D0*/ {0xea, 1079}, + /*II_DD_D0*/ {0xea, 1079}, + /*II_DD_D0*/ {0xea, 1079}, + /*II_DD_D0*/ {0xea, 1079}, + /*II_DD_D0*/ {0xea, 1079}, + /*II_DD_D0*/ {0xea, 1079}, + /*II_DD_D0*/ {0xea, 1079}, + /*II_DD_D8*/ {0xea, 1084}, + /*II_DD_D8*/ {0xea, 1084}, + /*II_DD_D8*/ {0xea, 1084}, + /*II_DD_D8*/ {0xea, 1084}, + /*II_DD_D8*/ {0xea, 1084}, + /*II_DD_D8*/ {0xea, 1084}, + /*II_DD_D8*/ {0xea, 1084}, + /*II_DD_D8*/ {0xea, 1084}, + /*II_DD_E0*/ {0xf9, 1534}, + /*II_DD_E1*/ {0xeb, 1534}, + /*II_DD_E0*/ {0xf9, 1534}, + /*II_DD_E0*/ {0xf9, 1534}, + /*II_DD_E0*/ {0xf9, 1534}, + /*II_DD_E0*/ {0xf9, 1534}, + /*II_DD_E0*/ {0xf9, 1534}, + /*II_DD_E0*/ {0xf9, 1534}, + /*II_DD_E8*/ {0xea, 1541}, + /*II_DD_E9*/ {0xeb, 1541}, + /*II_DD_E8*/ {0xea, 1541}, + /*II_DD_E8*/ {0xea, 1541}, + /*II_DD_E8*/ {0xea, 1541}, + /*II_DD_E8*/ {0xea, 1541}, + /*II_DD_E8*/ {0xea, 1541}, + /*II_DD_E8*/ {0xea, 1541}, + /*II_DE_00*/ {0xee, 1317}, + /*II_DE_01*/ {0xee, 1324}, + /*II_DE_02*/ {0xee, 1331}, + /*II_DE_03*/ {0xee, 1338}, + /*II_DE_04*/ {0xee, 1346}, + /*II_DE_05*/ {0xee, 1353}, + /*II_DE_06*/ {0xee, 1361}, + /*II_DE_07*/ {0xee, 1368}, + /*II_DE_C0*/ {0xf9, 1549}, + /*II_DE_C1*/ {0xeb, 1549}, + /*II_DE_C0*/ {0xf9, 1549}, + /*II_DE_C0*/ {0xf9, 1549}, + /*II_DE_C0*/ {0xf9, 1549}, + /*II_DE_C0*/ {0xf9, 1549}, + /*II_DE_C0*/ {0xf9, 1549}, + /*II_DE_C0*/ {0xf9, 1549}, + /*II_DE_C8*/ {0xf9, 1556}, + /*II_DE_C9*/ {0xeb, 1556}, + /*II_DE_C8*/ {0xf9, 1556}, + /*II_DE_C8*/ {0xf9, 1556}, + /*II_DE_C8*/ {0xf9, 1556}, + /*II_DE_C8*/ {0xf9, 1556}, + /*II_DE_C8*/ {0xf9, 1556}, + /*II_DE_C8*/ {0xf9, 1556}, + /*II_DE_D9*/ {0xeb, 1563}, + /*II_DE_E0*/ {0xf9, 1571}, + /*II_DE_E1*/ {0xeb, 1571}, + /*II_DE_E0*/ {0xf9, 1571}, + /*II_DE_E0*/ {0xf9, 1571}, + /*II_DE_E0*/ {0xf9, 1571}, + /*II_DE_E0*/ {0xf9, 1571}, + /*II_DE_E0*/ {0xf9, 1571}, + /*II_DE_E0*/ {0xf9, 1571}, + /*II_DE_E8*/ {0xf9, 1579}, + /*II_DE_E9*/ {0xeb, 1579}, + /*II_DE_E8*/ {0xf9, 1579}, + /*II_DE_E8*/ {0xf9, 1579}, + /*II_DE_E8*/ {0xf9, 1579}, + /*II_DE_E8*/ {0xf9, 1579}, + /*II_DE_E8*/ {0xf9, 1579}, + /*II_DE_E8*/ {0xf9, 1579}, + /*II_DE_F0*/ {0xf9, 1586}, + /*II_DE_F1*/ {0xeb, 1586}, + /*II_DE_F0*/ {0xf9, 1586}, + /*II_DE_F0*/ {0xf9, 1586}, + /*II_DE_F0*/ {0xf9, 1586}, + /*II_DE_F0*/ {0xf9, 1586}, + /*II_DE_F0*/ {0xf9, 1586}, + /*II_DE_F0*/ {0xf9, 1586}, + /*II_DE_F8*/ {0xf9, 1594}, + /*II_DE_F9*/ {0xeb, 1594}, + /*II_DE_F8*/ {0xf9, 1594}, + /*II_DE_F8*/ {0xf9, 1594}, + /*II_DE_F8*/ {0xf9, 1594}, + /*II_DE_F8*/ {0xf9, 1594}, + /*II_DE_F8*/ {0xf9, 1594}, + /*II_DE_F8*/ {0xf9, 1594}, + /*II_DF_00*/ {0xee, 1418}, + /*II_DF_01*/ {0xfc, 1424}, + /*II_DF_02*/ {0xfd, 1432}, + /*II_DF_03*/ {0xfd, 1438}, + /*II_DF_04*/ {0xf4, 1601}, + /*II_DF_05*/ {0xf8, 1418}, + /*II_DF_06*/ {0xf5, 1607}, + /*II_DF_07*/ {0xfb, 1438}, + /*II_DF_E8*/ {0xf6, 1614}, + /*II_DF_E8*/ {0xf6, 1614}, + /*II_DF_E8*/ {0xf6, 1614}, + /*II_DF_E8*/ {0xf6, 1614}, + /*II_DF_E8*/ {0xf6, 1614}, + /*II_DF_E8*/ {0xf6, 1614}, + /*II_DF_E8*/ {0xf6, 1614}, + /*II_DF_E8*/ {0xf6, 1614}, + /*II_DF_F0*/ {0xf6, 1623}, + /*II_DF_F0*/ {0xf6, 1623}, + /*II_DF_F0*/ {0xf6, 1623}, + /*II_DF_F0*/ {0xf6, 1623}, + /*II_DF_F0*/ {0xf6, 1623}, + /*II_DF_F0*/ {0xf6, 1623}, + /*II_DF_F0*/ {0xf6, 1623}, + /*II_DF_F0*/ {0xf6, 1623}, + /*II_F6_00*/ {0xfe, 206}, + /*II_F6_02*/ {0xff, 1631}, + /*II_F6_03*/ {0x100, 1636}, + /*II_F6_04*/ {0x101, 1641}, + /*II_F6_05*/ {0x101, 117}, + /*II_F6_06*/ {0x102, 1646}, + /*II_F6_07*/ {0x102, 1651}, + /*II_F7_00*/ {0x103, 206}, + /*II_F7_02*/ {0x104, 1631}, + /*II_F7_03*/ {0x105, 1636}, + /*II_F7_04*/ {0x106, 1641}, + /*II_F7_05*/ {0x106, 117}, + /*II_F7_06*/ {0x107, 1646}, + /*II_F7_07*/ {0x107, 1651}, + /*II_FE_00*/ {0x108, 81}, + /*II_FE_01*/ {0x108, 86}, + /*II_FF_00*/ {0x109, 81}, + /*II_FF_01*/ {0x109, 86}, + /*II_FF_02*/ {0x10a, 456}, + /*II_FF_03*/ {0x10b, 260}, + /*II_FF_04*/ {0x10c, 462}, + /*II_FF_05*/ {0x10d, 467}, + /*II_FF_06*/ {0x10e, 16}, + /*II_0F_00_00*/ {0x10f, 1657}, + /*II_0F_00_01*/ {0x110, 1663}, + /*II_0F_00_02*/ {0x110, 34436}, + /*II_0F_00_03*/ {0x111, 34442}, + /*II_0F_00_04*/ {0x112, 1679}, + /*II_0F_00_05*/ {0x112, 1685}, + /*II_0F_01_00*/ {0x113, 1691}, + /*II_0F_01_01*/ {0x113, 1697}, + /*II_0F_01_02*/ {0x113, 34471}, + /*II_0F_01_03*/ {0x113, 34477}, + /*II_0F_01_04*/ {0x114, 1715}, + /*II_0F_01_06*/ {0x115, 34489}, + /*II_0F_01_07*/ {0x116, 34495}, + /*II_0F_01_C1*/ {0x117, 1735}, + /*II_0F_01_C2*/ {0x117, 1743}, + /*II_0F_01_C3*/ {0x117, 1753}, + /*II_0F_01_C4*/ {0x117, 1763}, + /*II_0F_01_C8*/ {0x118, 1771}, + /*II_0F_01_C9*/ {0x118, 1780}, + /*II_0F_01_D0*/ {0x88, 1787}, + /*II_0F_01_D1*/ {0x88, 1795}, + /*II_0F_01_D4*/ {0x117, 1803}, + /*II_0F_01_D5*/ {0x119, 1811}, + /*II_0F_01_D8*/ {0x11a, 1817}, + /*II_0F_01_D9*/ {0x11b, 1824}, + /*II_0F_01_DA*/ {0x11c, 1833}, + /*II_0F_01_DB*/ {0x11c, 1841}, + /*II_0F_01_DC*/ {0x11b, 1849}, + /*II_0F_01_DD*/ {0x11b, 1855}, + /*II_0F_01_DE*/ {0x11c, 1861}, + /*II_0F_01_DF*/ {0x11d, 1869}, + /*II_0F_01_F8*/ {0x11e, 1878}, + /*II_0F_01_F9*/ {0x11e, 1886}, + /*II_0F_0D_00*/ {0x11f, 1894}, + /*II_0F_0D_01*/ {0x11f, 1904}, + /*II_0F_0F_0C*/ {0x120, 1915}, + /*II_0F_0F_0D*/ {0x121, 1922}, + /*II_0F_0F_1C*/ {0x120, 1929}, + /*II_0F_0F_1D*/ {0x121, 1936}, + /*II_0F_0F_8A*/ {0x120, 1943}, + /*II_0F_0F_8E*/ {0x120, 1951}, + /*II_0F_0F_90*/ {0x121, 1960}, + /*II_0F_0F_94*/ {0x121, 1969}, + /*II_0F_0F_96*/ {0x121, 1976}, + /*II_0F_0F_97*/ {0x121, 1983}, + /*II_0F_0F_9A*/ {0x121, 1992}, + /*II_0F_0F_9E*/ {0x121, 1999}, + /*II_0F_0F_A0*/ {0x121, 2006}, + /*II_0F_0F_A4*/ {0x121, 2015}, + /*II_0F_0F_A6*/ {0x121, 2022}, + /*II_0F_0F_A7*/ {0x121, 2032}, + /*II_0F_0F_AA*/ {0x121, 2042}, + /*II_0F_0F_AE*/ {0x121, 2050}, + /*II_0F_0F_B0*/ {0x121, 2057}, + /*II_0F_0F_B4*/ {0x121, 2066}, + /*II_0F_0F_B6*/ {0x121, 2073}, + /*II_0F_0F_B7*/ {0x121, 2083}, + /*II_0F_0F_BB*/ {0x120, 2092}, + /*II_0F_0F_BF*/ {0x121, 2100}, + /*II_0F_10*/ {0x122, 2109}, + /*II_66_0F_10*/ {0x123, 2117}, + /*II_F3_0F_10*/ {0x124, 2125}, + /*II_F2_0F_10*/ {0x125, 2132}, + /*II_0F_11*/ {0x12a, 2109}, + /*II_66_0F_11*/ {0x12b, 2117}, + /*II_F3_0F_11*/ {0x12c, 2125}, + /*II_F2_0F_11*/ {0x12d, 2132}, + /*II_66_0F_12*/ {0x132, 2190}, + /*II_F3_0F_12*/ {0x133, 2198}, + /*II_F2_0F_12*/ {0x133, 2208}, + /*II_0F_13*/ {0x137, 2182}, + /*II_66_0F_13*/ {0x138, 2190}, + /*II_0F_14*/ {0x13a, 2266}, + /*II_66_0F_14*/ {0x13b, 2276}, + /*II_0F_15*/ {0x13a, 2308}, + /*II_66_0F_15*/ {0x13b, 2318}, + /*II_66_0F_16*/ {0x132, 2367}, + /*II_F3_0F_16*/ {0x13d, 2375}, + /*II_0F_17*/ {0x137, 2359}, + /*II_66_0F_17*/ {0x138, 2367}, + /*II_0F_18_00*/ {0x13e, 2424}, + /*II_0F_18_01*/ {0x13e, 2437}, + /*II_0F_18_02*/ {0x13e, 2449}, + /*II_0F_18_03*/ {0x13e, 2461}, + /*II_0F_28*/ {0x122, 2473}, + /*II_66_0F_28*/ {0x123, 2481}, + /*II_0F_29*/ {0x12a, 2473}, + /*II_66_0F_29*/ {0x12b, 2481}, + /*II_0F_2A*/ {0x13f, 2507}, + /*II_66_0F_2A*/ {0x140, 2517}, + /*II_F3_0F_2A*/ {0x141, 2527}, + /*II_F2_0F_2A*/ {0x142, 2537}, + /*II_0F_2B*/ {0x143, 2569}, + /*II_66_0F_2B*/ {0x144, 2578}, + /*II_F3_0F_2B*/ {0x145, 2587}, + /*II_F2_0F_2B*/ {0x146, 2596}, + /*II_0F_2C*/ {0x148, 2625}, + /*II_66_0F_2C*/ {0x149, 2636}, + /*II_F3_0F_2C*/ {0x14a, 2647}, + /*II_F2_0F_2C*/ {0x14b, 2658}, + /*II_0F_2D*/ {0x148, 2693}, + /*II_66_0F_2D*/ {0x13b, 2703}, + /*II_F3_0F_2D*/ {0x14a, 2713}, + /*II_F2_0F_2D*/ {0x14b, 2723}, + /*II_0F_2E*/ {0x14d, 2755}, + /*II_66_0F_2E*/ {0x14e, 2764}, + /*II_0F_2F*/ {0x14d, 2793}, + /*II_66_0F_2F*/ {0x14e, 2801}, + /*II_0F_50*/ {0x151, 2827}, + /*II_66_0F_50*/ {0x152, 2837}, + /*II_0F_51*/ {0x13a, 2869}, + /*II_66_0F_51*/ {0x13b, 2877}, + /*II_F3_0F_51*/ {0x154, 2885}, + /*II_F2_0F_51*/ {0x14e, 2893}, + /*II_0F_52*/ {0x13a, 2937}, + /*II_F3_0F_52*/ {0x154, 2946}, + /*II_0F_53*/ {0x13a, 2975}, + /*II_F3_0F_53*/ {0x154, 2982}, + /*II_0F_54*/ {0x13a, 3005}, + /*II_66_0F_54*/ {0x13b, 3012}, + /*II_0F_55*/ {0x13a, 3035}, + /*II_66_0F_55*/ {0x13b, 3043}, + /*II_0F_56*/ {0x13a, 3069}, + /*II_66_0F_56*/ {0x13b, 3075}, + /*II_0F_57*/ {0x13a, 3095}, + /*II_66_0F_57*/ {0x13b, 3102}, + /*II_0F_58*/ {0x13a, 3125}, + /*II_66_0F_58*/ {0x13b, 3132}, + /*II_F3_0F_58*/ {0x154, 3139}, + /*II_F2_0F_58*/ {0x14e, 3146}, + /*II_0F_59*/ {0x13a, 3185}, + /*II_66_0F_59*/ {0x13b, 3192}, + /*II_F3_0F_59*/ {0x154, 3199}, + /*II_F2_0F_59*/ {0x14e, 3206}, + /*II_0F_5A*/ {0x14e, 3245}, + /*II_66_0F_5A*/ {0x13b, 3255}, + /*II_F3_0F_5A*/ {0x155, 3265}, + /*II_F2_0F_5A*/ {0x14e, 3275}, + /*II_0F_5B*/ {0x13b, 3329}, + /*II_66_0F_5B*/ {0x13b, 3339}, + /*II_F3_0F_5B*/ {0x13b, 3349}, + /*II_0F_5C*/ {0x13a, 3394}, + /*II_66_0F_5C*/ {0x13b, 3401}, + /*II_F3_0F_5C*/ {0x154, 3408}, + /*II_F2_0F_5C*/ {0x14e, 3415}, + /*II_0F_5D*/ {0x13a, 3454}, + /*II_66_0F_5D*/ {0x13b, 3461}, + /*II_F3_0F_5D*/ {0x154, 3468}, + /*II_F2_0F_5D*/ {0x14e, 3475}, + /*II_0F_5E*/ {0x13a, 3514}, + /*II_66_0F_5E*/ {0x13b, 3521}, + /*II_F3_0F_5E*/ {0x154, 3528}, + /*II_F2_0F_5E*/ {0x14e, 3535}, + /*II_0F_5F*/ {0x13a, 3574}, + /*II_66_0F_5F*/ {0x13b, 3581}, + /*II_F3_0F_5F*/ {0x154, 3588}, + /*II_F2_0F_5F*/ {0x14e, 3595}, + /*II_0F_60*/ {0x158, 3634}, + /*II_66_0F_60*/ {0x13b, 3634}, + /*II_0F_61*/ {0x158, 3657}, + /*II_66_0F_61*/ {0x13b, 3657}, + /*II_0F_62*/ {0x158, 3680}, + /*II_66_0F_62*/ {0x13b, 3680}, + /*II_0F_63*/ {0x159, 3703}, + /*II_66_0F_63*/ {0x13b, 3703}, + /*II_0F_64*/ {0x159, 3724}, + /*II_66_0F_64*/ {0x13b, 3724}, + /*II_0F_65*/ {0x159, 3743}, + /*II_66_0F_65*/ {0x13b, 3743}, + /*II_0F_66*/ {0x159, 3762}, + /*II_66_0F_66*/ {0x13b, 3762}, + /*II_0F_67*/ {0x159, 3781}, + /*II_66_0F_67*/ {0x13b, 3781}, + /*II_0F_68*/ {0x159, 3802}, + /*II_66_0F_68*/ {0x13b, 3802}, + /*II_0F_69*/ {0x159, 3825}, + /*II_66_0F_69*/ {0x13b, 3825}, + /*II_0F_6A*/ {0x159, 3848}, + /*II_66_0F_6A*/ {0x13b, 3848}, + /*II_0F_6B*/ {0x159, 3871}, + /*II_66_0F_6B*/ {0x13b, 3871}, + /*II_66_0F_6C*/ {0x13b, 3892}, + /*II_66_0F_6D*/ {0x13b, 3917}, + /*II_0F_6F*/ {0x15d, 3948}, + /*II_66_0F_6F*/ {0x123, 3968}, + /*II_F3_0F_6F*/ {0x123, 3976}, + /*II_0F_74*/ {0x159, 4065}, + /*II_66_0F_74*/ {0x13b, 4065}, + /*II_0F_75*/ {0x159, 4084}, + /*II_66_0F_75*/ {0x13b, 4084}, + /*II_0F_76*/ {0x159, 4103}, + /*II_66_0F_76*/ {0x13b, 4103}, + /*II_0F_77*/ {0x161, 4122}, + /*II_0F_78*/ {0x163, 4150}, + /*II_0F_79*/ {0x166, 4174}, + /*II_66_0F_79*/ {0x167, 4158}, + /*II_F2_0F_79*/ {0x168, 4165}, + /*II_0F_7A_30*/ {0x169, 4183}, + /*II_0F_7A_31*/ {0x16a, 4193}, + /*II_66_0F_7C*/ {0x16b, 4203}, + /*II_F2_0F_7C*/ {0x16b, 4211}, + /*II_66_0F_7D*/ {0x16b, 4237}, + /*II_F2_0F_7D*/ {0x16b, 4245}, + /*II_F3_0F_7E*/ {0x125, 3948}, + /*II_0F_7F*/ {0x16f, 3948}, + /*II_66_0F_7F*/ {0x12b, 3968}, + /*II_F3_0F_7F*/ {0x12b, 3976}, + /*II_F3_0F_B8*/ {0x173, 4360}, + /*II_0F_BA_04*/ {0x174, 872}, + /*II_0F_BA_05*/ {0x175, 887}, + /*II_0F_BA_06*/ {0x175, 912}, + /*II_0F_BA_07*/ {0x175, 934}, + /*II_0F_BC*/ {0x176, 4368}, + /*II_F3_0F_BC*/ {0x177, 4373}, + /*II_0F_BD*/ {0x176, 4380}, + /*II_F3_0F_BD*/ {0x178, 4385}, + /*II_0F_C7_07*/ {0x188, 6407}, + /*II_66_0F_D0*/ {0x16b, 6416}, + /*II_F2_0F_D0*/ {0x16b, 6426}, + /*II_0F_D1*/ {0x159, 6458}, + /*II_66_0F_D1*/ {0x13b, 6458}, + /*II_0F_D2*/ {0x159, 6473}, + /*II_66_0F_D2*/ {0x13b, 6473}, + /*II_0F_D3*/ {0x159, 6488}, + /*II_66_0F_D3*/ {0x13b, 6488}, + /*II_0F_D4*/ {0x14e, 6503}, + /*II_66_0F_D4*/ {0x13b, 6503}, + /*II_0F_D5*/ {0x159, 6518}, + /*II_66_0F_D5*/ {0x13b, 6518}, + /*II_66_0F_D6*/ {0x12d, 3948}, + /*II_F3_0F_D6*/ {0x189, 6535}, + /*II_F2_0F_D6*/ {0x18a, 6544}, + /*II_0F_D7*/ {0x18c, 6553}, + /*II_66_0F_D7*/ {0x18d, 6553}, + /*II_0F_D8*/ {0x159, 6574}, + /*II_66_0F_D8*/ {0x13b, 6574}, + /*II_0F_D9*/ {0x159, 6593}, + /*II_66_0F_D9*/ {0x13b, 6593}, + /*II_0F_DA*/ {0x18f, 6612}, + /*II_66_0F_DA*/ {0x13b, 6612}, + /*II_0F_DB*/ {0x159, 6629}, + /*II_66_0F_DB*/ {0x13b, 6629}, + /*II_0F_DC*/ {0x159, 6642}, + /*II_66_0F_DC*/ {0x13b, 6642}, + /*II_0F_DD*/ {0x159, 6661}, + /*II_66_0F_DD*/ {0x13b, 6661}, + /*II_0F_DE*/ {0x18f, 6670}, + /*II_66_0F_DE*/ {0x13b, 6670}, + /*II_0F_DF*/ {0x159, 6687}, + /*II_66_0F_DF*/ {0x13b, 6687}, + /*II_0F_E0*/ {0x18f, 6702}, + /*II_66_0F_E0*/ {0x13b, 6702}, + /*II_0F_E1*/ {0x159, 6717}, + /*II_66_0F_E1*/ {0x13b, 6717}, + /*II_0F_E2*/ {0x159, 6732}, + /*II_66_0F_E2*/ {0x13b, 6732}, + /*II_0F_E3*/ {0x18f, 6747}, + /*II_66_0F_E3*/ {0x13b, 6747}, + /*II_0F_E4*/ {0x18f, 6762}, + /*II_66_0F_E4*/ {0x13b, 6762}, + /*II_0F_E5*/ {0x159, 6781}, + /*II_66_0F_E5*/ {0x13b, 6781}, + /*II_66_0F_E6*/ {0x13b, 6798}, + /*II_F3_0F_E6*/ {0x14e, 6809}, + /*II_F2_0F_E6*/ {0x13b, 6819}, + /*II_0F_E7*/ {0x190, 6863}, + /*II_66_0F_E7*/ {0x144, 6871}, + /*II_0F_E8*/ {0x159, 6890}, + /*II_66_0F_E8*/ {0x13b, 6890}, + /*II_0F_E9*/ {0x159, 6907}, + /*II_66_0F_E9*/ {0x13b, 6907}, + /*II_0F_EA*/ {0x18f, 6924}, + /*II_66_0F_EA*/ {0x13b, 6924}, + /*II_0F_EB*/ {0x159, 6941}, + /*II_66_0F_EB*/ {0x13b, 6941}, + /*II_0F_EC*/ {0x159, 6952}, + /*II_66_0F_EC*/ {0x13b, 6952}, + /*II_0F_ED*/ {0x159, 6969}, + /*II_66_0F_ED*/ {0x13b, 6969}, + /*II_0F_EE*/ {0x18f, 6986}, + /*II_66_0F_EE*/ {0x13b, 6986}, + /*II_0F_EF*/ {0x159, 7003}, + /*II_66_0F_EF*/ {0x13b, 7003}, + /*II_F2_0F_F0*/ {0x191, 7016}, + /*II_0F_F1*/ {0x159, 7031}, + /*II_66_0F_F1*/ {0x13b, 7031}, + /*II_0F_F2*/ {0x159, 7046}, + /*II_66_0F_F2*/ {0x13b, 7046}, + /*II_0F_F3*/ {0x159, 7061}, + /*II_66_0F_F3*/ {0x13b, 7061}, + /*II_0F_F4*/ {0x193, 7076}, + /*II_66_0F_F4*/ {0x13b, 7076}, + /*II_0F_F5*/ {0x159, 7095}, + /*II_66_0F_F5*/ {0x13b, 7095}, + /*II_0F_F6*/ {0x18f, 7114}, + /*II_66_0F_F6*/ {0x13b, 7114}, + /*II_0F_F7*/ {0x194, 7131}, + /*II_66_0F_F7*/ {0x195, 7141}, + /*II_0F_F8*/ {0x159, 7166}, + /*II_66_0F_F8*/ {0x13b, 7166}, + /*II_0F_F9*/ {0x159, 7181}, + /*II_66_0F_F9*/ {0x13b, 7181}, + /*II_0F_FA*/ {0x159, 7196}, + /*II_66_0F_FA*/ {0x13b, 7196}, + /*II_0F_FB*/ {0x193, 7211}, + /*II_66_0F_FB*/ {0x13b, 7211}, + /*II_0F_FC*/ {0x159, 7226}, + /*II_66_0F_FC*/ {0x13b, 7226}, + /*II_0F_FD*/ {0x159, 7241}, + /*II_66_0F_FD*/ {0x13b, 7241}, + /*II_0F_FE*/ {0x159, 7256}, + /*II_66_0F_FE*/ {0x13b, 7256}, + /*II_D9_06*/ {0x197, 7271}, + /*II_9B_D9_06*/ {0x198, 7280}, + /*II_D9_07*/ {0xfd, 7288}, + /*II_9B_D9_07*/ {0x199, 7296}, + /*II_DB_E2*/ {0xeb, 7303}, + /*II_9B_DB_E2*/ {0x19a, 7311}, + /*II_DB_E3*/ {0xeb, 7318}, + /*II_9B_DB_E3*/ {0x19a, 7326}, + /*II_DD_06*/ {0x197, 7333}, + /*II_9B_DD_06*/ {0x198, 7341}, + /*II_DD_07*/ {0xfd, 7348}, + /*II_9B_DD_07*/ {0x199, 7356}, + /*II_DF_E0*/ {0x19b, 7348}, + /*II_9B_DF_E0*/ {0x19c, 7356}, + /*II_0F_38_00*/ {0x19d, 7363}, + /*II_66_0F_38_00*/ {0x19e, 7363}, + /*II_0F_38_01*/ {0x19d, 7380}, + /*II_66_0F_38_01*/ {0x19e, 7380}, + /*II_0F_38_02*/ {0x19d, 7397}, + /*II_66_0F_38_02*/ {0x19e, 7397}, + /*II_0F_38_03*/ {0x19d, 7414}, + /*II_66_0F_38_03*/ {0x19e, 7414}, + /*II_0F_38_04*/ {0x19d, 7433}, + /*II_66_0F_38_04*/ {0x19e, 7433}, + /*II_0F_38_05*/ {0x19d, 7456}, + /*II_66_0F_38_05*/ {0x19e, 7456}, + /*II_0F_38_06*/ {0x19d, 7473}, + /*II_66_0F_38_06*/ {0x19e, 7473}, + /*II_0F_38_07*/ {0x19d, 7490}, + /*II_66_0F_38_07*/ {0x19e, 7490}, + /*II_0F_38_08*/ {0x19d, 7509}, + /*II_66_0F_38_08*/ {0x19e, 7509}, + /*II_0F_38_09*/ {0x19d, 7526}, + /*II_66_0F_38_09*/ {0x19e, 7526}, + /*II_0F_38_0A*/ {0x19d, 7543}, + /*II_66_0F_38_0A*/ {0x19e, 7543}, + /*II_0F_38_0B*/ {0x19d, 7560}, + /*II_66_0F_38_0B*/ {0x19e, 7560}, + /*II_66_0F_38_17*/ {0x1a0, 7651}, + /*II_0F_38_1C*/ {0x19d, 7710}, + /*II_66_0F_38_1C*/ {0x19e, 7710}, + /*II_0F_38_1D*/ {0x19d, 7725}, + /*II_66_0F_38_1D*/ {0x19e, 7725}, + /*II_0F_38_1E*/ {0x19d, 7740}, + /*II_66_0F_38_1E*/ {0x19e, 7740}, + /*II_66_0F_38_20*/ {0x1a5, 7755}, + /*II_66_0F_38_21*/ {0x1a6, 7776}, + /*II_66_0F_38_22*/ {0x1a7, 7797}, + /*II_66_0F_38_23*/ {0x1a5, 7818}, + /*II_66_0F_38_24*/ {0x1a6, 7839}, + /*II_66_0F_38_25*/ {0x1a5, 7860}, + /*II_66_0F_38_28*/ {0x1a9, 7881}, + /*II_66_0F_38_29*/ {0x1a9, 7898}, + /*II_66_0F_38_2A*/ {0x1aa, 7917}, + /*II_66_0F_38_2B*/ {0x1a9, 7938}, + /*II_66_0F_38_30*/ {0x1a5, 7983}, + /*II_66_0F_38_31*/ {0x1a6, 8004}, + /*II_66_0F_38_32*/ {0x1a7, 8025}, + /*II_66_0F_38_33*/ {0x1a5, 8046}, + /*II_66_0F_38_34*/ {0x1a6, 8067}, + /*II_66_0F_38_35*/ {0x1a5, 8088}, + /*II_66_0F_38_37*/ {0x1a0, 8109}, + /*II_66_0F_38_38*/ {0x1a9, 8128}, + /*II_66_0F_38_39*/ {0x1a9, 8145}, + /*II_66_0F_38_3A*/ {0x1a9, 8162}, + /*II_66_0F_38_3B*/ {0x1a9, 8179}, + /*II_66_0F_38_3C*/ {0x1a9, 8196}, + /*II_66_0F_38_3D*/ {0x1a9, 8213}, + /*II_66_0F_38_3E*/ {0x1a9, 8230}, + /*II_66_0F_38_3F*/ {0x1a9, 8247}, + /*II_66_0F_38_40*/ {0x1a9, 8264}, + /*II_66_0F_38_41*/ {0x1a9, 8281}, + /*II_66_0F_38_80*/ {0x1ad, 8306}, + /*II_66_0F_38_81*/ {0x1ad, 8314}, + /*II_66_0F_38_82*/ {0x1ad, 8323}, + /*II_66_0F_38_DB*/ {0x1b0, 9172}, + /*II_66_0F_38_DC*/ {0x1b0, 9189}, + /*II_66_0F_38_DD*/ {0x1b0, 9206}, + /*II_66_0F_38_DE*/ {0x1b0, 9231}, + /*II_66_0F_38_DF*/ {0x1b0, 9248}, + /*II_0F_38_F0*/ {0x1b3, 9273}, + /*II_F2_0F_38_F0*/ {0x1b4, 9280}, + /*II_0F_38_F1*/ {0x1b5, 9273}, + /*II_F2_0F_38_F1*/ {0x1b6, 9280}, + /*II_0F_71_02*/ {0x1cd, 6458}, + /*II_66_0F_71_02*/ {0x1ce, 6458}, + /*II_0F_71_04*/ {0x1cd, 6717}, + /*II_66_0F_71_04*/ {0x1ce, 6717}, + /*II_0F_71_06*/ {0x1cd, 7031}, + /*II_66_0F_71_06*/ {0x1ce, 7031}, + /*II_0F_72_02*/ {0x1cd, 6473}, + /*II_66_0F_72_02*/ {0x1ce, 6473}, + /*II_0F_72_04*/ {0x1cd, 6732}, + /*II_66_0F_72_04*/ {0x1ce, 6732}, + /*II_0F_72_06*/ {0x1cd, 7046}, + /*II_66_0F_72_06*/ {0x1ce, 7046}, + /*II_0F_73_02*/ {0x1cd, 6488}, + /*II_66_0F_73_02*/ {0x1ce, 6488}, + /*II_66_0F_73_03*/ {0x1ce, 9852}, + /*II_0F_73_06*/ {0x1cd, 7061}, + /*II_66_0F_73_06*/ {0x1ce, 7061}, + /*II_66_0F_73_07*/ {0x1ce, 9869}, + /*II_F3_0F_AE_00*/ {0x1d0, 9904}, + /*II_F3_0F_AE_01*/ {0x1d0, 9934}, + /*II_0F_AE_02*/ {0x1d1, 9944}, + /*II_F3_0F_AE_02*/ {0x1d0, 9953}, + /*II_0F_AE_03*/ {0x1d1, 9973}, + /*II_F3_0F_AE_03*/ {0x1d0, 9982}, + /*II_0F_C7_06*/ {0x1d3, 10002}, + /*II_66_0F_C7_06*/ {0x188, 10011}, + /*II_F3_0F_C7_06*/ {0x188, 10020} +}; + +_InstInfoEx InstInfosEx[381] = { + /*II_69*/ {{0x34, 117}, 0x0, 3, 0, 0, 0}, + /*II_6B*/ {{0x34, 117}, 0x0, 5, 0, 0, 0}, + /*II_98*/ {{0x4e, 228}, 0x0, 0, 0, 233, 239}, + /*II_99*/ {{0x4e, 245}, 0x0, 0, 0, 250, 255}, + /*II_E3*/ {{0x76, 427}, 0x0, 0, 0, 433, 440}, + /*II_0F_A4*/ {{0xac, 876}, 0x0, 1, 0, 0, 0}, + /*II_0F_A5*/ {{0xac, 876}, 0x0, 52, 0, 0, 0}, + /*II_0F_AC*/ {{0xac, 892}, 0x0, 1, 0, 0, 0}, + /*II_0F_AD*/ {{0xac, 892}, 0x0, 52, 0, 0, 0}, + /*II_V_0F_10*/ {{0x126, 2139}, 0x41, 0, 0, 0, 0}, + /*II_V_66_0F_10*/ {{0x126, 2148}, 0x41, 0, 0, 0, 0}, + /*II_V_F3_0F_10*/ {{0x127, 2157}, 0x20, 69, 0, 0, 0}, + /*II_V_F2_0F_10*/ {{0x127, 2165}, 0x20, 69, 0, 0, 0}, + /*II_VRR_F3_0F_10*/ {{0x128, 2157}, 0x60, 0, 0, 0, 0}, + /*II_VRR_F2_0F_10*/ {{0x129, 2165}, 0x60, 0, 0, 0, 0}, + /*II_V_0F_11*/ {{0x12e, 2139}, 0x41, 0, 0, 0, 0}, + /*II_V_66_0F_11*/ {{0x12e, 2148}, 0x41, 0, 0, 0, 0}, + /*II_V_F3_0F_11*/ {{0x127, 2157}, 0x20, 69, 0, 0, 0}, + /*II_V_F2_0F_11*/ {{0x127, 2165}, 0x20, 69, 0, 0, 0}, + /*II_VRR_F3_0F_11*/ {{0x12f, 2157}, 0x60, 0, 0, 0, 0}, + /*II_VRR_F2_0F_11*/ {{0x130, 2165}, 0x60, 0, 0, 0, 0}, + /*II_0F_12*/ {{0x131, 2173}, 0x0, 0, 0, 2182, 0}, + /*II_V_0F_12*/ {{0x134, 2217}, 0x0, 72, 0, 2227, 0}, + /*II_V_66_0F_12*/ {{0x135, 2236}, 0x0, 46, 0, 0, 0}, + /*II_V_F3_0F_12*/ {{0x126, 2245}, 0x41, 0, 0, 0, 0}, + /*II_V_F2_0F_12*/ {{0x136, 2256}, 0x41, 0, 0, 0, 0}, + /*II_V_0F_13*/ {{0x139, 2227}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_13*/ {{0x139, 2236}, 0x40, 0, 0, 0, 0}, + /*II_V_0F_14*/ {{0x13c, 2286}, 0x1, 90, 0, 0, 0}, + /*II_V_66_0F_14*/ {{0x13c, 2297}, 0x1, 90, 0, 0, 0}, + /*II_V_0F_15*/ {{0x13c, 2328}, 0x1, 90, 0, 0, 0}, + /*II_V_66_0F_15*/ {{0x13c, 2339}, 0x1, 90, 0, 0, 0}, + /*II_0F_16*/ {{0x131, 2350}, 0x0, 0, 0, 2359, 0}, + /*II_V_0F_16*/ {{0x134, 2385}, 0x0, 72, 0, 2395, 0}, + /*II_V_66_0F_16*/ {{0x135, 2404}, 0x0, 46, 0, 0, 0}, + /*II_V_F3_0F_16*/ {{0x126, 2413}, 0x41, 0, 0, 0, 0}, + /*II_V_0F_17*/ {{0x139, 2395}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_17*/ {{0x139, 2404}, 0x40, 0, 0, 0, 0}, + /*II_V_0F_28*/ {{0x126, 2489}, 0x41, 0, 0, 0, 0}, + /*II_V_66_0F_28*/ {{0x126, 2498}, 0x41, 0, 0, 0, 0}, + /*II_V_0F_29*/ {{0x12e, 2489}, 0x41, 0, 0, 0, 0}, + /*II_V_66_0F_29*/ {{0x12e, 2498}, 0x41, 0, 0, 0, 0}, + /*II_V_F3_0F_2A*/ {{0x135, 2547}, 0x2, 79, 0, 0, 0}, + /*II_V_F2_0F_2A*/ {{0x135, 2558}, 0x2, 79, 0, 0, 0}, + /*II_V_0F_2B*/ {{0x147, 2605}, 0x41, 0, 0, 0, 0}, + /*II_V_66_0F_2B*/ {{0x147, 2615}, 0x41, 0, 0, 0, 0}, + /*II_V_F3_0F_2C*/ {{0x14c, 2669}, 0x42, 0, 0, 0, 0}, + /*II_V_F2_0F_2C*/ {{0x14c, 2681}, 0x42, 0, 0, 0, 0}, + /*II_V_F3_0F_2D*/ {{0x14c, 2733}, 0x42, 0, 0, 0, 0}, + /*II_V_F2_0F_2D*/ {{0x14c, 2744}, 0x42, 0, 0, 0, 0}, + /*II_V_0F_2E*/ {{0x14f, 2773}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_2E*/ {{0x150, 2783}, 0x40, 0, 0, 0, 0}, + /*II_V_0F_2F*/ {{0x14f, 2809}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_2F*/ {{0x150, 2818}, 0x40, 0, 0, 0, 0}, + /*II_V_0F_50*/ {{0x153, 2847}, 0x41, 0, 0, 0, 0}, + /*II_V_66_0F_50*/ {{0x153, 2858}, 0x41, 0, 0, 0, 0}, + /*II_V_0F_51*/ {{0x126, 2901}, 0x41, 0, 0, 0, 0}, + /*II_V_66_0F_51*/ {{0x126, 2910}, 0x41, 0, 0, 0, 0}, + /*II_V_F3_0F_51*/ {{0x135, 2919}, 0x0, 71, 0, 0, 0}, + /*II_V_F2_0F_51*/ {{0x135, 2928}, 0x0, 72, 0, 0, 0}, + /*II_V_0F_52*/ {{0x126, 2955}, 0x41, 0, 0, 0, 0}, + /*II_V_F3_0F_52*/ {{0x135, 2965}, 0x0, 71, 0, 0, 0}, + /*II_V_0F_53*/ {{0x126, 2989}, 0x41, 0, 0, 0, 0}, + /*II_V_F3_0F_53*/ {{0x135, 2997}, 0x0, 71, 0, 0, 0}, + /*II_V_0F_54*/ {{0x13c, 3019}, 0x1, 90, 0, 0, 0}, + /*II_V_66_0F_54*/ {{0x13c, 3027}, 0x1, 90, 0, 0, 0}, + /*II_V_0F_55*/ {{0x13c, 3051}, 0x1, 90, 0, 0, 0}, + /*II_V_66_0F_55*/ {{0x13c, 3060}, 0x1, 90, 0, 0, 0}, + /*II_V_0F_56*/ {{0x13c, 3081}, 0x1, 90, 0, 0, 0}, + /*II_V_66_0F_56*/ {{0x13c, 3088}, 0x1, 90, 0, 0, 0}, + /*II_V_0F_57*/ {{0x13c, 3109}, 0x1, 90, 0, 0, 0}, + /*II_V_66_0F_57*/ {{0x13c, 3117}, 0x1, 90, 0, 0, 0}, + /*II_V_0F_58*/ {{0x13c, 3153}, 0x1, 90, 0, 0, 0}, + /*II_V_66_0F_58*/ {{0x13c, 3161}, 0x1, 90, 0, 0, 0}, + /*II_V_F3_0F_58*/ {{0x135, 3169}, 0x0, 71, 0, 0, 0}, + /*II_V_F2_0F_58*/ {{0x135, 3177}, 0x0, 72, 0, 0, 0}, + /*II_V_0F_59*/ {{0x13c, 3213}, 0x1, 90, 0, 0, 0}, + /*II_V_66_0F_59*/ {{0x13c, 3221}, 0x1, 90, 0, 0, 0}, + /*II_V_F3_0F_59*/ {{0x135, 3229}, 0x0, 71, 0, 0, 0}, + /*II_V_F2_0F_59*/ {{0x135, 3237}, 0x0, 72, 0, 0, 0}, + /*II_V_0F_5A*/ {{0x156, 3285}, 0x41, 0, 0, 0, 0}, + /*II_V_66_0F_5A*/ {{0x157, 3296}, 0x41, 0, 0, 0, 0}, + /*II_V_F3_0F_5A*/ {{0x135, 3307}, 0x0, 71, 0, 0, 0}, + /*II_V_F2_0F_5A*/ {{0x135, 3318}, 0x0, 72, 0, 0, 0}, + /*II_V_0F_5B*/ {{0x126, 3360}, 0x41, 0, 0, 0, 0}, + /*II_V_66_0F_5B*/ {{0x126, 3371}, 0x41, 0, 0, 0, 0}, + /*II_V_F3_0F_5B*/ {{0x126, 3382}, 0x41, 0, 0, 0, 0}, + /*II_V_0F_5C*/ {{0x13c, 3422}, 0x1, 90, 0, 0, 0}, + /*II_V_66_0F_5C*/ {{0x13c, 3430}, 0x1, 90, 0, 0, 0}, + /*II_V_F3_0F_5C*/ {{0x135, 3438}, 0x0, 71, 0, 0, 0}, + /*II_V_F2_0F_5C*/ {{0x135, 3446}, 0x0, 72, 0, 0, 0}, + /*II_V_0F_5D*/ {{0x13c, 3482}, 0x1, 90, 0, 0, 0}, + /*II_V_66_0F_5D*/ {{0x13c, 3490}, 0x1, 90, 0, 0, 0}, + /*II_V_F3_0F_5D*/ {{0x135, 3498}, 0x0, 71, 0, 0, 0}, + /*II_V_F2_0F_5D*/ {{0x135, 3506}, 0x0, 72, 0, 0, 0}, + /*II_V_0F_5E*/ {{0x13c, 3542}, 0x1, 90, 0, 0, 0}, + /*II_V_66_0F_5E*/ {{0x13c, 3550}, 0x1, 90, 0, 0, 0}, + /*II_V_F3_0F_5E*/ {{0x135, 3558}, 0x0, 71, 0, 0, 0}, + /*II_V_F2_0F_5E*/ {{0x135, 3566}, 0x0, 72, 0, 0, 0}, + /*II_V_0F_5F*/ {{0x13c, 3602}, 0x1, 90, 0, 0, 0}, + /*II_V_66_0F_5F*/ {{0x13c, 3610}, 0x1, 90, 0, 0, 0}, + /*II_V_F3_0F_5F*/ {{0x135, 3618}, 0x0, 71, 0, 0, 0}, + /*II_V_F2_0F_5F*/ {{0x135, 3626}, 0x0, 72, 0, 0, 0}, + /*II_V_66_0F_60*/ {{0x135, 3645}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_61*/ {{0x135, 3668}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_62*/ {{0x135, 3691}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_63*/ {{0x135, 3713}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_64*/ {{0x135, 3733}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_65*/ {{0x135, 3752}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_66*/ {{0x135, 3771}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_67*/ {{0x135, 3791}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_68*/ {{0x135, 3813}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_69*/ {{0x135, 3836}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_6A*/ {{0x135, 3859}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_6B*/ {{0x135, 3881}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_6C*/ {{0x135, 3904}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_6D*/ {{0x135, 3929}, 0x0, 73, 0, 0, 0}, + /*II_0F_6E*/ {{0x15a, 3942}, 0x0, 0, 0, 0, 3948}, + /*II_66_0F_6E*/ {{0x15b, 3942}, 0x0, 0, 0, 0, 3948}, + /*II_V_66_0F_6E*/ {{0x15c, 3954}, 0x46, 0, 0, 3961, 0}, + /*II_V_66_0F_6F*/ {{0x126, 3984}, 0x41, 0, 0, 0, 0}, + /*II_V_F3_0F_6F*/ {{0x126, 3993}, 0x41, 0, 0, 0, 0}, + /*II_0F_70*/ {{0x15e, 4002}, 0x0, 1, 0, 0, 0}, + /*II_66_0F_70*/ {{0x15f, 4010}, 0x0, 1, 0, 0, 0}, + /*II_F3_0F_70*/ {{0x15f, 4018}, 0x0, 1, 0, 0, 0}, + /*II_F2_0F_70*/ {{0x15f, 4027}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_70*/ {{0x160, 4036}, 0x40, 1, 0, 0, 0}, + /*II_V_F3_0F_70*/ {{0x160, 4045}, 0x40, 1, 0, 0, 0}, + /*II_V_F2_0F_70*/ {{0x160, 4055}, 0x40, 1, 0, 0, 0}, + /*II_V_66_0F_74*/ {{0x135, 4074}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_75*/ {{0x135, 4093}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_76*/ {{0x135, 4112}, 0x0, 73, 0, 0, 0}, + /*II_V_0F_77*/ {{0x162, 4128}, 0x49, 0, 0, 4140, 0}, + /*II_66_0F_78*/ {{0x164, 4158}, 0x0, 8, 0, 0, 0}, + /*II_F2_0F_78*/ {{0x165, 4165}, 0x0, 7, 8, 0, 0}, + /*II_V_66_0F_7C*/ {{0x13c, 4219}, 0x1, 90, 0, 0, 0}, + /*II_V_F2_0F_7C*/ {{0x13c, 4228}, 0x1, 90, 0, 0, 0}, + /*II_V_66_0F_7D*/ {{0x13c, 4253}, 0x1, 90, 0, 0, 0}, + /*II_V_F2_0F_7D*/ {{0x13c, 4262}, 0x1, 90, 0, 0, 0}, + /*II_0F_7E*/ {{0x16c, 3942}, 0x0, 0, 0, 0, 3948}, + /*II_66_0F_7E*/ {{0x16d, 3942}, 0x0, 0, 0, 0, 3948}, + /*II_V_66_0F_7E*/ {{0x16e, 3954}, 0x46, 0, 0, 3961, 0}, + /*II_V_F3_0F_7E*/ {{0x150, 3961}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_7F*/ {{0x12e, 3984}, 0x41, 0, 0, 0, 0}, + /*II_V_F3_0F_7F*/ {{0x12e, 3993}, 0x41, 0, 0, 0, 0}, + /*II_0F_AE_04*/ {{0x170, 4271}, 0x0, 0, 0, 0, 4278}, + /*II_0F_AE_05*/ {{0x171, 4287}, 0x0, 0, 0, 4295, 4303}, + /*II_0F_AE_06*/ {{0x171, 4313}, 0x0, 0, 0, 4321, 4331}, + /*II_0F_AE_07*/ {{0x172, 4343}, 0x0, 0, 0, 4351, 0}, + /*II_0F_C2*/ {{0x179, 4392}, 0x0, 0, 0, 4401, 4410}, + /*II_66_0F_C2*/ {{0x17a, 4471}, 0x0, 0, 0, 4480, 4489}, + /*II_F3_0F_C2*/ {{0x17b, 4550}, 0x0, 0, 0, 4559, 4568}, + /*II_F2_0F_C2*/ {{0x17c, 4629}, 0x0, 0, 0, 4638, 4647}, + /*II_V_0F_C2*/ {{0x17d, 4708}, 0x1, 90, 0, 4718, 4728}, + /*II_V_66_0F_C2*/ {{0x17d, 5110}, 0x1, 90, 0, 5120, 5130}, + /*II_V_F3_0F_C2*/ {{0x17e, 5512}, 0x0, 71, 0, 5522, 5532}, + /*II_V_F2_0F_C2*/ {{0x17e, 5914}, 0x0, 72, 0, 5924, 5934}, + /*II_0F_C4*/ {{0x17f, 6316}, 0x0, 1, 0, 0, 0}, + /*II_66_0F_C4*/ {{0x180, 6316}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_C4*/ {{0x181, 6324}, 0x0, 25, 1, 0, 0}, + /*II_0F_C5*/ {{0x182, 6333}, 0x0, 1, 0, 0, 0}, + /*II_66_0F_C5*/ {{0x183, 6333}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_C5*/ {{0x184, 6341}, 0x40, 1, 0, 0, 0}, + /*II_0F_C6*/ {{0x185, 6350}, 0x0, 1, 0, 0, 0}, + /*II_66_0F_C6*/ {{0x15f, 6358}, 0x0, 1, 0, 0, 0}, + /*II_V_0F_C6*/ {{0x186, 6366}, 0x1, 90, 1, 0, 0}, + /*II_V_66_0F_C6*/ {{0x186, 6375}, 0x1, 90, 1, 0, 0}, + /*II_0F_C7_01*/ {{0x187, 6384}, 0x0, 0, 0, 0, 6395}, + /*II_V_66_0F_D0*/ {{0x13c, 6436}, 0x1, 90, 0, 0, 0}, + /*II_V_F2_0F_D0*/ {{0x13c, 6447}, 0x1, 90, 0, 0, 0}, + /*II_V_66_0F_D1*/ {{0x135, 6465}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_D2*/ {{0x135, 6480}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_D3*/ {{0x135, 6495}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_D4*/ {{0x135, 6510}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_D5*/ {{0x135, 6526}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_D6*/ {{0x18b, 3961}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_D7*/ {{0x18e, 6563}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_D8*/ {{0x135, 6583}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_D9*/ {{0x135, 6602}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_DA*/ {{0x135, 6620}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_DB*/ {{0x135, 6635}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_DC*/ {{0x135, 6651}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_DD*/ {{0x135, 6651}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_DE*/ {{0x135, 6678}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_DF*/ {{0x135, 6694}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_E0*/ {{0x135, 6709}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_E1*/ {{0x135, 6724}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_E2*/ {{0x135, 6739}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_E3*/ {{0x135, 6754}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_E4*/ {{0x135, 6771}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_E5*/ {{0x135, 6789}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_E6*/ {{0x157, 6829}, 0x41, 0, 0, 0, 0}, + /*II_V_F3_0F_E6*/ {{0x156, 6841}, 0x41, 0, 0, 0, 0}, + /*II_V_F2_0F_E6*/ {{0x157, 6852}, 0x41, 0, 0, 0, 0}, + /*II_V_66_0F_E7*/ {{0x147, 6880}, 0x41, 0, 0, 0, 0}, + /*II_V_66_0F_E8*/ {{0x135, 6898}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_E9*/ {{0x135, 6915}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_EA*/ {{0x135, 6932}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_EB*/ {{0x135, 6946}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_EC*/ {{0x135, 6960}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_ED*/ {{0x135, 6977}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_EE*/ {{0x135, 6994}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_EF*/ {{0x135, 7009}, 0x0, 73, 0, 0, 0}, + /*II_V_F2_0F_F0*/ {{0x192, 7023}, 0x41, 0, 0, 0, 0}, + /*II_V_66_0F_F1*/ {{0x135, 7038}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_F2*/ {{0x135, 7053}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_F3*/ {{0x135, 7068}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_F4*/ {{0x135, 7085}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_F5*/ {{0x135, 7104}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_F6*/ {{0x135, 7122}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_F7*/ {{0x196, 7153}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_F8*/ {{0x135, 7173}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_F9*/ {{0x135, 7188}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_FA*/ {{0x135, 7203}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_FB*/ {{0x135, 7218}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_FC*/ {{0x135, 7233}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_FD*/ {{0x135, 7248}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_FE*/ {{0x135, 7263}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_00*/ {{0x135, 7371}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_01*/ {{0x135, 7388}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_02*/ {{0x135, 7405}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_03*/ {{0x135, 7423}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_04*/ {{0x135, 7444}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_05*/ {{0x135, 7464}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_06*/ {{0x135, 7481}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_07*/ {{0x135, 7499}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_08*/ {{0x135, 7517}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_09*/ {{0x135, 7534}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_0A*/ {{0x135, 7551}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_0B*/ {{0x135, 7570}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_0C*/ {{0x13c, 7581}, 0x1, 90, 0, 0, 0}, + /*II_V_66_0F_38_0D*/ {{0x13c, 7592}, 0x1, 90, 0, 0, 0}, + /*II_V_66_0F_38_0E*/ {{0x126, 7603}, 0x41, 0, 0, 0, 0}, + /*II_V_66_0F_38_0F*/ {{0x126, 7612}, 0x41, 0, 0, 0, 0}, + /*II_66_0F_38_10*/ {{0x19f, 7621}, 0x0, 74, 0, 0, 0}, + /*II_66_0F_38_14*/ {{0x19f, 7631}, 0x0, 74, 0, 0, 0}, + /*II_66_0F_38_15*/ {{0x19f, 7641}, 0x0, 74, 0, 0, 0}, + /*II_V_66_0F_38_17*/ {{0x126, 7658}, 0x41, 0, 0, 0, 0}, + /*II_V_66_0F_38_18*/ {{0x1a1, 7666}, 0x41, 0, 0, 0, 0}, + /*II_V_66_0F_38_19*/ {{0x1a2, 7680}, 0x50, 0, 0, 0, 0}, + /*II_V_66_0F_38_1A*/ {{0x1a3, 7694}, 0x50, 0, 0, 0, 0}, + /*II_V_66_0F_38_1C*/ {{0x1a4, 7717}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_38_1D*/ {{0x1a4, 7732}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_38_1E*/ {{0x1a4, 7747}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_38_20*/ {{0x150, 7765}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_38_21*/ {{0x14f, 7786}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_38_22*/ {{0x1a8, 7807}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_38_23*/ {{0x150, 7828}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_38_24*/ {{0x14f, 7849}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_38_25*/ {{0x150, 7870}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_38_28*/ {{0x135, 7889}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_29*/ {{0x135, 7907}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_2A*/ {{0x1ab, 7927}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_38_2B*/ {{0x135, 7948}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_2C*/ {{0x13c, 7959}, 0x1, 92, 0, 0, 0}, + /*II_V_66_0F_38_2D*/ {{0x13c, 7971}, 0x1, 92, 0, 0, 0}, + /*II_V_66_0F_38_2E*/ {{0x1ac, 7959}, 0x1, 83, 0, 0, 0}, + /*II_V_66_0F_38_2F*/ {{0x1ac, 7971}, 0x1, 83, 0, 0, 0}, + /*II_V_66_0F_38_30*/ {{0x150, 7993}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_38_31*/ {{0x14f, 8014}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_38_32*/ {{0x1a8, 8035}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_38_33*/ {{0x150, 8056}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_38_34*/ {{0x14f, 8077}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_38_35*/ {{0x150, 8098}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_38_37*/ {{0x135, 8118}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_38*/ {{0x135, 8136}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_39*/ {{0x135, 8153}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_3A*/ {{0x135, 8170}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_3B*/ {{0x135, 8187}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_3C*/ {{0x135, 8204}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_3D*/ {{0x135, 8221}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_3E*/ {{0x135, 8238}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_3F*/ {{0x135, 8255}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_40*/ {{0x135, 8272}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_41*/ {{0x1a4, 8293}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_38_96*/ {{0x1ae, 8332}, 0x7, 90, 0, 8348, 0}, + /*II_V_66_0F_38_97*/ {{0x1ae, 8364}, 0x7, 90, 0, 8380, 0}, + /*II_V_66_0F_38_98*/ {{0x1ae, 8396}, 0x7, 90, 0, 8409, 0}, + /*II_V_66_0F_38_99*/ {{0x1af, 8422}, 0x6, 80, 0, 8435, 0}, + /*II_V_66_0F_38_9A*/ {{0x1ae, 8448}, 0x7, 90, 0, 8461, 0}, + /*II_V_66_0F_38_9B*/ {{0x1af, 8474}, 0x6, 80, 0, 8487, 0}, + /*II_V_66_0F_38_9C*/ {{0x1ae, 8500}, 0x7, 90, 0, 8514, 0}, + /*II_V_66_0F_38_9D*/ {{0x1af, 8528}, 0x6, 80, 0, 8542, 0}, + /*II_V_66_0F_38_9E*/ {{0x1ae, 8556}, 0x7, 90, 0, 8570, 0}, + /*II_V_66_0F_38_9F*/ {{0x1af, 8584}, 0x6, 80, 0, 8598, 0}, + /*II_V_66_0F_38_A6*/ {{0x1ae, 8612}, 0x7, 90, 0, 8628, 0}, + /*II_V_66_0F_38_A7*/ {{0x1ae, 8644}, 0x7, 90, 0, 8660, 0}, + /*II_V_66_0F_38_A8*/ {{0x1ae, 8676}, 0x7, 90, 0, 8689, 0}, + /*II_V_66_0F_38_A9*/ {{0x1af, 8702}, 0x6, 80, 0, 8715, 0}, + /*II_V_66_0F_38_AA*/ {{0x1ae, 8728}, 0x7, 90, 0, 8741, 0}, + /*II_V_66_0F_38_AB*/ {{0x1af, 8754}, 0x6, 80, 0, 8767, 0}, + /*II_V_66_0F_38_AC*/ {{0x1ae, 8780}, 0x7, 90, 0, 8794, 0}, + /*II_V_66_0F_38_AD*/ {{0x1af, 8808}, 0x6, 80, 0, 8822, 0}, + /*II_V_66_0F_38_AE*/ {{0x1ae, 8836}, 0x7, 90, 0, 8850, 0}, + /*II_V_66_0F_38_AF*/ {{0x1af, 8864}, 0x6, 80, 0, 8878, 0}, + /*II_V_66_0F_38_B6*/ {{0x1ae, 8892}, 0x7, 90, 0, 8908, 0}, + /*II_V_66_0F_38_B7*/ {{0x1ae, 8924}, 0x7, 90, 0, 8940, 0}, + /*II_V_66_0F_38_B8*/ {{0x1ae, 8956}, 0x7, 90, 0, 8969, 0}, + /*II_V_66_0F_38_B9*/ {{0x1af, 8982}, 0x6, 80, 0, 8995, 0}, + /*II_V_66_0F_38_BA*/ {{0x1ae, 9008}, 0x7, 90, 0, 9021, 0}, + /*II_V_66_0F_38_BB*/ {{0x1af, 9034}, 0x6, 80, 0, 9047, 0}, + /*II_V_66_0F_38_BC*/ {{0x1ae, 9060}, 0x7, 90, 0, 9074, 0}, + /*II_V_66_0F_38_BD*/ {{0x1af, 9088}, 0x6, 80, 0, 9102, 0}, + /*II_V_66_0F_38_BE*/ {{0x1ae, 9116}, 0x7, 90, 0, 9130, 0}, + /*II_V_66_0F_38_BF*/ {{0x1af, 9144}, 0x6, 80, 0, 9158, 0}, + /*II_V_66_0F_38_DB*/ {{0x1b1, 9180}, 0x40, 0, 0, 0, 0}, + /*II_V_66_0F_38_DC*/ {{0x1b2, 9197}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_DD*/ {{0x1b2, 9218}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_DE*/ {{0x1b2, 9239}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_38_DF*/ {{0x1b2, 9260}, 0x0, 73, 0, 0, 0}, + /*II_V_66_0F_3A_04*/ {{0x1b7, 7581}, 0x41, 1, 0, 0, 0}, + /*II_V_66_0F_3A_05*/ {{0x1b7, 7592}, 0x41, 1, 0, 0, 0}, + /*II_V_66_0F_3A_06*/ {{0x1b8, 9287}, 0x10, 86, 1, 0, 0}, + /*II_66_0F_3A_08*/ {{0x19f, 9299}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_08*/ {{0x1b7, 9308}, 0x41, 1, 0, 0, 0}, + /*II_66_0F_3A_09*/ {{0x19f, 9318}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_09*/ {{0x1b7, 9327}, 0x41, 1, 0, 0, 0}, + /*II_66_0F_3A_0A*/ {{0x1b9, 9337}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_0A*/ {{0x181, 9346}, 0x0, 71, 1, 0, 0}, + /*II_66_0F_3A_0B*/ {{0x1ba, 9356}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_0B*/ {{0x181, 9365}, 0x0, 72, 1, 0, 0}, + /*II_66_0F_3A_0C*/ {{0x19f, 9375}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_0C*/ {{0x186, 9384}, 0x1, 90, 1, 0, 0}, + /*II_66_0F_3A_0D*/ {{0x19f, 9394}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_0D*/ {{0x186, 9403}, 0x1, 90, 1, 0, 0}, + /*II_66_0F_3A_0E*/ {{0x19f, 9413}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_0E*/ {{0x181, 9422}, 0x0, 73, 1, 0, 0}, + /*II_0F_3A_0F*/ {{0x1bb, 9432}, 0x0, 1, 0, 0, 0}, + /*II_66_0F_3A_0F*/ {{0x1bc, 9432}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_0F*/ {{0x181, 9441}, 0x0, 73, 1, 0, 0}, + /*II_66_0F_3A_14*/ {{0x1bd, 9451}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_14*/ {{0x1be, 9459}, 0x40, 1, 0, 0, 0}, + /*II_66_0F_3A_15*/ {{0x1bf, 6333}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_15*/ {{0x1c0, 6341}, 0x40, 1, 0, 0, 0}, + /*II_66_0F_3A_16*/ {{0x1c1, 9468}, 0x0, 1, 0, 0, 9476}, + /*II_V_66_0F_3A_16*/ {{0x1c2, 9484}, 0x46, 1, 0, 9493, 0}, + /*II_66_0F_3A_17*/ {{0x1c3, 9502}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_17*/ {{0x1c4, 9513}, 0x40, 1, 0, 0, 0}, + /*II_V_66_0F_3A_18*/ {{0x1b8, 9525}, 0x10, 73, 1, 0, 0}, + /*II_V_66_0F_3A_19*/ {{0x1c5, 9538}, 0x50, 1, 0, 0, 0}, + /*II_66_0F_3A_20*/ {{0x1c6, 9552}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_20*/ {{0x181, 9560}, 0x0, 76, 1, 0, 0}, + /*II_66_0F_3A_21*/ {{0x1b9, 9569}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_21*/ {{0x181, 9579}, 0x0, 71, 1, 0, 0}, + /*II_66_0F_3A_22*/ {{0x1c7, 9590}, 0x0, 1, 0, 0, 9598}, + /*II_V_66_0F_3A_22*/ {{0x181, 9606}, 0x6, 79, 1, 9615, 0}, + /*II_66_0F_3A_40*/ {{0x19f, 9624}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_40*/ {{0x186, 9630}, 0x1, 90, 1, 0, 0}, + /*II_66_0F_3A_41*/ {{0x19f, 9637}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_41*/ {{0x181, 9643}, 0x0, 73, 1, 0, 0}, + /*II_66_0F_3A_42*/ {{0x19f, 9650}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_42*/ {{0x181, 9659}, 0x0, 73, 1, 0, 0}, + /*II_66_0F_3A_44*/ {{0x1c8, 9669}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_44*/ {{0x1c9, 9680}, 0x0, 73, 1, 0, 0}, + /*II_V_66_0F_3A_4A*/ {{0x186, 9692}, 0x1, 90, 84, 0, 0}, + /*II_V_66_0F_3A_4B*/ {{0x186, 9703}, 0x1, 90, 84, 0, 0}, + /*II_V_66_0F_3A_4C*/ {{0x181, 9714}, 0x0, 73, 82, 0, 0}, + /*II_66_0F_3A_60*/ {{0x1ca, 9725}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_60*/ {{0x160, 9736}, 0x40, 1, 0, 0, 0}, + /*II_66_0F_3A_61*/ {{0x1ca, 9748}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_61*/ {{0x160, 9759}, 0x40, 1, 0, 0, 0}, + /*II_66_0F_3A_62*/ {{0x1ca, 9771}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_62*/ {{0x160, 9782}, 0x40, 1, 0, 0, 0}, + /*II_66_0F_3A_63*/ {{0x1ca, 9794}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_63*/ {{0x160, 9805}, 0x40, 1, 0, 0, 0}, + /*II_66_0F_3A_DF*/ {{0x1cb, 9817}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_3A_DF*/ {{0x1cc, 9834}, 0x40, 1, 0, 0, 0}, + /*II_V_66_0F_71_02*/ {{0x1cf, 6465}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_71_04*/ {{0x1cf, 6724}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_71_06*/ {{0x1cf, 7038}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_72_02*/ {{0x1cf, 6480}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_72_04*/ {{0x1cf, 6739}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_72_06*/ {{0x1cf, 7053}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_73_02*/ {{0x1cf, 6495}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_73_03*/ {{0x1cf, 9860}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_73_06*/ {{0x1cf, 7068}, 0x0, 1, 0, 0, 0}, + /*II_V_66_0F_73_07*/ {{0x1cf, 9877}, 0x0, 1, 0, 0, 0}, + /*II_0F_AE_00*/ {{0x170, 9886}, 0x0, 0, 0, 0, 9894}, + /*II_0F_AE_01*/ {{0x170, 9914}, 0x0, 0, 0, 0, 9923}, + /*II_V_0F_AE_02*/ {{0x1d2, 9963}, 0x40, 0, 0, 0, 0}, + /*II_V_0F_AE_03*/ {{0x1d2, 9992}, 0x40, 0, 0, 0, 0} +}; + +_InstNode InstructionsTree[5688] = { +/* 0 - _00 */ 0x2000, +/* 1 - _01 */ 0x2001, +/* 2 - _02 */ 0x2002, +/* 3 - _03 */ 0x2003, +/* 4 - _04 */ 0x2004, +/* 5 - _05 */ 0x2005, +/* 6 - _06 */ 0x2006, +/* 7 - _07 */ 0x2007, +/* 8 - _08 */ 0x2008, +/* 9 - _09 */ 0x2009, +/* a - _0A */ 0x200a, +/* b - _0B */ 0x200b, +/* c - _0C */ 0x200c, +/* d - _0D */ 0x200d, +/* e - _0E */ 0x200e, +/* f - _0F */ 0x8100, +/* 10 - _10 */ 0x200f, +/* 11 - _11 */ 0x2010, +/* 12 - _12 */ 0x2011, +/* 13 - _13 */ 0x2012, +/* 14 - _14 */ 0x2013, +/* 15 - _15 */ 0x2014, +/* 16 - _16 */ 0x2015, +/* 17 - _17 */ 0x2016, +/* 18 - _18 */ 0x2017, +/* 19 - _19 */ 0x2018, +/* 1a - _1A */ 0x2019, +/* 1b - _1B */ 0x201a, +/* 1c - _1C */ 0x201b, +/* 1d - _1D */ 0x201c, +/* 1e - _1E */ 0x201d, +/* 1f - _1F */ 0x201e, +/* 20 - _20 */ 0x201f, +/* 21 - _21 */ 0x2020, +/* 22 - _22 */ 0x2021, +/* 23 - _23 */ 0x2022, +/* 24 - _24 */ 0x2023, +/* 25 - _25 */ 0x2024, +/* 26 - */ 0, +/* 27 - _27 */ 0x2025, +/* 28 - _28 */ 0x2026, +/* 29 - _29 */ 0x2027, +/* 2a - _2A */ 0x2028, +/* 2b - _2B */ 0x2029, +/* 2c - _2C */ 0x202a, +/* 2d - _2D */ 0x202b, +/* 2e - */ 0, +/* 2f - _2F */ 0x202c, +/* 30 - _30 */ 0x202d, +/* 31 - _31 */ 0x202e, +/* 32 - _32 */ 0x202f, +/* 33 - _33 */ 0x2030, +/* 34 - _34 */ 0x2031, +/* 35 - _35 */ 0x2032, +/* 36 - */ 0, +/* 37 - _37 */ 0x2033, +/* 38 - _38 */ 0x2034, +/* 39 - _39 */ 0x2035, +/* 3a - _3A */ 0x2036, +/* 3b - _3B */ 0x2037, +/* 3c - _3C */ 0x2038, +/* 3d - _3D */ 0x2039, +/* 3e - */ 0, +/* 3f - _3F */ 0x203a, +/* 40 - _40 */ 0x203b, +/* 41 - _40 */ 0x203c, +/* 42 - _40 */ 0x203d, +/* 43 - _40 */ 0x203e, +/* 44 - _40 */ 0x203f, +/* 45 - _40 */ 0x2040, +/* 46 - _40 */ 0x2041, +/* 47 - _40 */ 0x2042, +/* 48 - _48 */ 0x2043, +/* 49 - _48 */ 0x2044, +/* 4a - _48 */ 0x2045, +/* 4b - _48 */ 0x2046, +/* 4c - _48 */ 0x2047, +/* 4d - _48 */ 0x2048, +/* 4e - _48 */ 0x2049, +/* 4f - _48 */ 0x204a, +/* 50 - _50 */ 0x204b, +/* 51 - _50 */ 0x204c, +/* 52 - _50 */ 0x204d, +/* 53 - _50 */ 0x204e, +/* 54 - _50 */ 0x204f, +/* 55 - _50 */ 0x2050, +/* 56 - _50 */ 0x2051, +/* 57 - _50 */ 0x2052, +/* 58 - _58 */ 0x2053, +/* 59 - _58 */ 0x2054, +/* 5a - _58 */ 0x2055, +/* 5b - _58 */ 0x2056, +/* 5c - _58 */ 0x2057, +/* 5d - _58 */ 0x2058, +/* 5e - _58 */ 0x2059, +/* 5f - _58 */ 0x205a, +/* 60 - _60 */ 0x205b, +/* 61 - _61 */ 0x205c, +/* 62 - _62 */ 0x205d, +/* 63 - _63 */ 0x205e, +/* 64 - */ 0, +/* 65 - */ 0, +/* 66 - */ 0, +/* 67 - */ 0, +/* 68 - _68 */ 0x205f, +/* 69 - _69 */ 0x4000, +/* 6a - _6A */ 0x2060, +/* 6b - _6B */ 0x4001, +/* 6c - _6C */ 0x2061, +/* 6d - _6D */ 0x2062, +/* 6e - _6E */ 0x2063, +/* 6f - _6F */ 0x2064, +/* 70 - _70 */ 0x2065, +/* 71 - _71 */ 0x2066, +/* 72 - _72 */ 0x2067, +/* 73 - _73 */ 0x2068, +/* 74 - _74 */ 0x2069, +/* 75 - _75 */ 0x206a, +/* 76 - _76 */ 0x206b, +/* 77 - _77 */ 0x206c, +/* 78 - _78 */ 0x206d, +/* 79 - _79 */ 0x206e, +/* 7a - _7A */ 0x206f, +/* 7b - _7B */ 0x2070, +/* 7c - _7C */ 0x2071, +/* 7d - _7D */ 0x2072, +/* 7e - _7E */ 0x2073, +/* 7f - _7F */ 0x2074, +/* 80 - _80 */ 0x6200, +/* 81 - _81 */ 0x6208, +/* 82 - _82 */ 0x6210, +/* 83 - _83 */ 0x6218, +/* 84 - _84 */ 0x2075, +/* 85 - _85 */ 0x2076, +/* 86 - _86 */ 0x2077, +/* 87 - _87 */ 0x2078, +/* 88 - _88 */ 0x2079, +/* 89 - _89 */ 0x207a, +/* 8a - _8A */ 0x207b, +/* 8b - _8B */ 0x207c, +/* 8c - _8C */ 0x207d, +/* 8d - _8D */ 0x207e, +/* 8e - _8E */ 0x207f, +/* 8f - _8F */ 0x6220, +/* 90 - _90 */ 0x2080, +/* 91 - _91 */ 0x2081, +/* 92 - _92 */ 0x2082, +/* 93 - _93 */ 0x2083, +/* 94 - _94 */ 0x2084, +/* 95 - _95 */ 0x2085, +/* 96 - _96 */ 0x2086, +/* 97 - _97 */ 0x2087, +/* 98 - _98 */ 0x4002, +/* 99 - _99 */ 0x4003, +/* 9a - _9A */ 0x2088, +/* 9b - */ 0, +/* 9c - _9C */ 0x2089, +/* 9d - _9D */ 0x208a, +/* 9e - _9E */ 0x208b, +/* 9f - _9F */ 0x208c, +/* a0 - _A0 */ 0x208d, +/* a1 - _A1 */ 0x208e, +/* a2 - _A2 */ 0x208f, +/* a3 - _A3 */ 0x2090, +/* a4 - _A4 */ 0x2091, +/* a5 - _A5 */ 0x2092, +/* a6 - _A6 */ 0x2093, +/* a7 - _A7 */ 0x2094, +/* a8 - _A8 */ 0x2095, +/* a9 - _A9 */ 0x2096, +/* aa - _AA */ 0x2097, +/* ab - _AB */ 0x2098, +/* ac - _AC */ 0x2099, +/* ad - _AD */ 0x209a, +/* ae - _AE */ 0x209b, +/* af - _AF */ 0x209c, +/* b0 - _B0 */ 0x209d, +/* b1 - _B0 */ 0x209e, +/* b2 - _B0 */ 0x209f, +/* b3 - _B0 */ 0x20a0, +/* b4 - _B0 */ 0x20a1, +/* b5 - _B0 */ 0x20a2, +/* b6 - _B0 */ 0x20a3, +/* b7 - _B0 */ 0x20a4, +/* b8 - _B8 */ 0x20a5, +/* b9 - _B8 */ 0x20a6, +/* ba - _B8 */ 0x20a7, +/* bb - _B8 */ 0x20a8, +/* bc - _B8 */ 0x20a9, +/* bd - _B8 */ 0x20aa, +/* be - _B8 */ 0x20ab, +/* bf - _B8 */ 0x20ac, +/* c0 - _C0 */ 0x6228, +/* c1 - _C1 */ 0x6230, +/* c2 - _C2 */ 0x20ad, +/* c3 - _C3 */ 0x20ae, +/* c4 - _C4 */ 0x20af, +/* c5 - _C5 */ 0x20b0, +/* c6 - _C6 */ 0xa238, +/* c7 - _C7 */ 0xa280, +/* c8 - _C8 */ 0x20b1, +/* c9 - _C9 */ 0x20b2, +/* ca - _CA */ 0x20b3, +/* cb - _CB */ 0x20b4, +/* cc - _CC */ 0x20b5, +/* cd - _CD */ 0x20b6, +/* ce - _CE */ 0x20b7, +/* cf - _CF */ 0x20b8, +/* d0 - _D0 */ 0x62c8, +/* d1 - _D1 */ 0x62d0, +/* d2 - _D2 */ 0x62d8, +/* d3 - _D3 */ 0x62e0, +/* d4 - _D4 */ 0x20b9, +/* d5 - _D5 */ 0x20ba, +/* d6 - _D6 */ 0x20bb, +/* d7 - _D7 */ 0x20bc, +/* d8 - _D8 */ 0xa2e8, +/* d9 - _D9 */ 0xa330, +/* da - _DA */ 0xa378, +/* db - _DB */ 0xa3c0, +/* dc - _DC */ 0xa408, +/* dd - _DD */ 0xa450, +/* de - _DE */ 0xa498, +/* df - _DF */ 0xa4e0, +/* e0 - _E0 */ 0x20bd, +/* e1 - _E1 */ 0x20be, +/* e2 - _E2 */ 0x20bf, +/* e3 - _E3 */ 0x4004, +/* e4 - _E4 */ 0x20c0, +/* e5 - _E5 */ 0x20c1, +/* e6 - _E6 */ 0x20c2, +/* e7 - _E7 */ 0x20c3, +/* e8 - _E8 */ 0x20c4, +/* e9 - _E9 */ 0x20c5, +/* ea - _EA */ 0x20c6, +/* eb - _EB */ 0x20c7, +/* ec - _EC */ 0x20c8, +/* ed - _ED */ 0x20c9, +/* ee - _EE */ 0x20ca, +/* ef - _EF */ 0x20cb, +/* f0 - */ 0, +/* f1 - _F1 */ 0x20cc, +/* f2 - */ 0, +/* f3 - */ 0, +/* f4 - _F4 */ 0x20cd, +/* f5 - _F5 */ 0x20ce, +/* f6 - _F6 */ 0x6528, +/* f7 - _F7 */ 0x6530, +/* f8 - _F8 */ 0x20cf, +/* f9 - _F9 */ 0x20d0, +/* fa - _FA */ 0x20d1, +/* fb - _FB */ 0x20d2, +/* fc - _FC */ 0x20d3, +/* fd - _FD */ 0x20d4, +/* fe - _FE */ 0x6538, +/* ff - _FF */ 0x6540, +/* 100 - _0F_00 */ 0x6548, +/* 101 - _0F_01 */ 0xa550, +/* 102 - _0F_02 */ 0x20d5, +/* 103 - _0F_03 */ 0x20d6, +/* 104 - */ 0, +/* 105 - _0F_05 */ 0x20d7, +/* 106 - _0F_06 */ 0x20d8, +/* 107 - _0F_07 */ 0x20d9, +/* 108 - _0F_08 */ 0x20da, +/* 109 - _0F_09 */ 0x20db, +/* 10a - */ 0, +/* 10b - _0F_0B */ 0x20dc, +/* 10c - */ 0, +/* 10d - _0F_0D */ 0x6598, +/* 10e - _0F_0E */ 0x20dd, +/* 10f - _0F_0F */ 0x85a0, +/* 110 - _0F_10 */ 0xc6a0, +/* 111 - _0F_11 */ 0xc6ac, +/* 112 - _0F_12 */ 0xc6b8, +/* 113 - _0F_13 */ 0xc6c4, +/* 114 - _0F_14 */ 0xc6d0, +/* 115 - _0F_15 */ 0xc6dc, +/* 116 - _0F_16 */ 0xc6e8, +/* 117 - _0F_17 */ 0xc6f4, +/* 118 - _0F_18 */ 0x6700, +/* 119 - */ 0, +/* 11a - */ 0, +/* 11b - */ 0, +/* 11c - */ 0, +/* 11d - */ 0, +/* 11e - */ 0, +/* 11f - _0F_1F */ 0x20de, +/* 120 - _0F_20 */ 0x20df, +/* 121 - _0F_21 */ 0x20e0, +/* 122 - _0F_22 */ 0x20e1, +/* 123 - _0F_23 */ 0x20e2, +/* 124 - */ 0, +/* 125 - */ 0, +/* 126 - */ 0, +/* 127 - */ 0, +/* 128 - _0F_28 */ 0xc708, +/* 129 - _0F_29 */ 0xc714, +/* 12a - _0F_2A */ 0xc720, +/* 12b - _0F_2B */ 0xc72c, +/* 12c - _0F_2C */ 0xc738, +/* 12d - _0F_2D */ 0xc744, +/* 12e - _0F_2E */ 0xc750, +/* 12f - _0F_2F */ 0xc75c, +/* 130 - _0F_30 */ 0x20e3, +/* 131 - _0F_31 */ 0x20e4, +/* 132 - _0F_32 */ 0x20e5, +/* 133 - _0F_33 */ 0x20e6, +/* 134 - _0F_34 */ 0x20e7, +/* 135 - _0F_35 */ 0x20e8, +/* 136 - */ 0, +/* 137 - _0F_37 */ 0x20e9, +/* 138 - _0F_38 */ 0x8768, +/* 139 - */ 0, +/* 13a - _0F_3A */ 0x8868, +/* 13b - */ 0, +/* 13c - */ 0, +/* 13d - */ 0, +/* 13e - */ 0, +/* 13f - */ 0, +/* 140 - _0F_40 */ 0x20ea, +/* 141 - _0F_41 */ 0x20eb, +/* 142 - _0F_42 */ 0x20ec, +/* 143 - _0F_43 */ 0x20ed, +/* 144 - _0F_44 */ 0x20ee, +/* 145 - _0F_45 */ 0x20ef, +/* 146 - _0F_46 */ 0x20f0, +/* 147 - _0F_47 */ 0x20f1, +/* 148 - _0F_48 */ 0x20f2, +/* 149 - _0F_49 */ 0x20f3, +/* 14a - _0F_4A */ 0x20f4, +/* 14b - _0F_4B */ 0x20f5, +/* 14c - _0F_4C */ 0x20f6, +/* 14d - _0F_4D */ 0x20f7, +/* 14e - _0F_4E */ 0x20f8, +/* 14f - _0F_4F */ 0x20f9, +/* 150 - _0F_50 */ 0xc968, +/* 151 - _0F_51 */ 0xc974, +/* 152 - _0F_52 */ 0xc980, +/* 153 - _0F_53 */ 0xc98c, +/* 154 - _0F_54 */ 0xc998, +/* 155 - _0F_55 */ 0xc9a4, +/* 156 - _0F_56 */ 0xc9b0, +/* 157 - _0F_57 */ 0xc9bc, +/* 158 - _0F_58 */ 0xc9c8, +/* 159 - _0F_59 */ 0xc9d4, +/* 15a - _0F_5A */ 0xc9e0, +/* 15b - _0F_5B */ 0xc9ec, +/* 15c - _0F_5C */ 0xc9f8, +/* 15d - _0F_5D */ 0xca04, +/* 15e - _0F_5E */ 0xca10, +/* 15f - _0F_5F */ 0xca1c, +/* 160 - _0F_60 */ 0xca28, +/* 161 - _0F_61 */ 0xca34, +/* 162 - _0F_62 */ 0xca40, +/* 163 - _0F_63 */ 0xca4c, +/* 164 - _0F_64 */ 0xca58, +/* 165 - _0F_65 */ 0xca64, +/* 166 - _0F_66 */ 0xca70, +/* 167 - _0F_67 */ 0xca7c, +/* 168 - _0F_68 */ 0xca88, +/* 169 - _0F_69 */ 0xca94, +/* 16a - _0F_6A */ 0xcaa0, +/* 16b - _0F_6B */ 0xcaac, +/* 16c - _0F_6C */ 0xcab8, +/* 16d - _0F_6D */ 0xcac4, +/* 16e - _0F_6E */ 0xcad0, +/* 16f - _0F_6F */ 0xcadc, +/* 170 - _0F_70 */ 0xcae8, +/* 171 - _0F_71 */ 0x6af4, +/* 172 - _0F_72 */ 0x6afc, +/* 173 - _0F_73 */ 0x6b04, +/* 174 - _0F_74 */ 0xcb0c, +/* 175 - _0F_75 */ 0xcb18, +/* 176 - _0F_76 */ 0xcb24, +/* 177 - _0F_77 */ 0xcb30, +/* 178 - _0F_78 */ 0xcb3c, +/* 179 - _0F_79 */ 0xcb48, +/* 17a - _0F_7A */ 0x8b54, +/* 17b - */ 0, +/* 17c - _0F_7C */ 0xcc54, +/* 17d - _0F_7D */ 0xcc60, +/* 17e - _0F_7E */ 0xcc6c, +/* 17f - _0F_7F */ 0xcc78, +/* 180 - _0F_80 */ 0x20fa, +/* 181 - _0F_81 */ 0x20fb, +/* 182 - _0F_82 */ 0x20fc, +/* 183 - _0F_83 */ 0x20fd, +/* 184 - _0F_84 */ 0x20fe, +/* 185 - _0F_85 */ 0x20ff, +/* 186 - _0F_86 */ 0x2100, +/* 187 - _0F_87 */ 0x2101, +/* 188 - _0F_88 */ 0x2102, +/* 189 - _0F_89 */ 0x2103, +/* 18a - _0F_8A */ 0x2104, +/* 18b - _0F_8B */ 0x2105, +/* 18c - _0F_8C */ 0x2106, +/* 18d - _0F_8D */ 0x2107, +/* 18e - _0F_8E */ 0x2108, +/* 18f - _0F_8F */ 0x2109, +/* 190 - _0F_90 */ 0x210a, +/* 191 - _0F_91 */ 0x210b, +/* 192 - _0F_92 */ 0x210c, +/* 193 - _0F_93 */ 0x210d, +/* 194 - _0F_94 */ 0x210e, +/* 195 - _0F_95 */ 0x210f, +/* 196 - _0F_96 */ 0x2110, +/* 197 - _0F_97 */ 0x2111, +/* 198 - _0F_98 */ 0x2112, +/* 199 - _0F_99 */ 0x2113, +/* 19a - _0F_9A */ 0x2114, +/* 19b - _0F_9B */ 0x2115, +/* 19c - _0F_9C */ 0x2116, +/* 19d - _0F_9D */ 0x2117, +/* 19e - _0F_9E */ 0x2118, +/* 19f - _0F_9F */ 0x2119, +/* 1a0 - _0F_A0 */ 0x211a, +/* 1a1 - _0F_A1 */ 0x211b, +/* 1a2 - _0F_A2 */ 0x211c, +/* 1a3 - _0F_A3 */ 0x211d, +/* 1a4 - _0F_A4 */ 0x4005, +/* 1a5 - _0F_A5 */ 0x4006, +/* 1a6 - */ 0, +/* 1a7 - */ 0, +/* 1a8 - _0F_A8 */ 0x211e, +/* 1a9 - _0F_A9 */ 0x211f, +/* 1aa - _0F_AA */ 0x2120, +/* 1ab - _0F_AB */ 0x2121, +/* 1ac - _0F_AC */ 0x4007, +/* 1ad - _0F_AD */ 0x4008, +/* 1ae - _0F_AE */ 0x6c84, +/* 1af - _0F_AF */ 0x2122, +/* 1b0 - _0F_B0 */ 0x2123, +/* 1b1 - _0F_B1 */ 0x2124, +/* 1b2 - _0F_B2 */ 0x2125, +/* 1b3 - _0F_B3 */ 0x2126, +/* 1b4 - _0F_B4 */ 0x2127, +/* 1b5 - _0F_B5 */ 0x2128, +/* 1b6 - _0F_B6 */ 0x2129, +/* 1b7 - _0F_B7 */ 0x212a, +/* 1b8 - _0F_B8 */ 0xcc8c, +/* 1b9 - _0F_B9 */ 0x212b, +/* 1ba - _0F_BA */ 0x6c98, +/* 1bb - _0F_BB */ 0x212c, +/* 1bc - _0F_BC */ 0xcca0, +/* 1bd - _0F_BD */ 0xccac, +/* 1be - _0F_BE */ 0x212d, +/* 1bf - _0F_BF */ 0x212e, +/* 1c0 - _0F_C0 */ 0x212f, +/* 1c1 - _0F_C1 */ 0x2130, +/* 1c2 - _0F_C2 */ 0xccb8, +/* 1c3 - _0F_C3 */ 0x2131, +/* 1c4 - _0F_C4 */ 0xccc4, +/* 1c5 - _0F_C5 */ 0xccd0, +/* 1c6 - _0F_C6 */ 0xccdc, +/* 1c7 - _0F_C7 */ 0x6ce8, +/* 1c8 - _0F_C8 */ 0x2132, +/* 1c9 - _0F_C8 */ 0x2133, +/* 1ca - _0F_C8 */ 0x2134, +/* 1cb - _0F_C8 */ 0x2135, +/* 1cc - _0F_C8 */ 0x2136, +/* 1cd - _0F_C8 */ 0x2137, +/* 1ce - _0F_C8 */ 0x2138, +/* 1cf - _0F_C8 */ 0x2139, +/* 1d0 - _0F_D0 */ 0xccf0, +/* 1d1 - _0F_D1 */ 0xccfc, +/* 1d2 - _0F_D2 */ 0xcd08, +/* 1d3 - _0F_D3 */ 0xcd14, +/* 1d4 - _0F_D4 */ 0xcd20, +/* 1d5 - _0F_D5 */ 0xcd2c, +/* 1d6 - _0F_D6 */ 0xcd38, +/* 1d7 - _0F_D7 */ 0xcd44, +/* 1d8 - _0F_D8 */ 0xcd50, +/* 1d9 - _0F_D9 */ 0xcd5c, +/* 1da - _0F_DA */ 0xcd68, +/* 1db - _0F_DB */ 0xcd74, +/* 1dc - _0F_DC */ 0xcd80, +/* 1dd - _0F_DD */ 0xcd8c, +/* 1de - _0F_DE */ 0xcd98, +/* 1df - _0F_DF */ 0xcda4, +/* 1e0 - _0F_E0 */ 0xcdb0, +/* 1e1 - _0F_E1 */ 0xcdbc, +/* 1e2 - _0F_E2 */ 0xcdc8, +/* 1e3 - _0F_E3 */ 0xcdd4, +/* 1e4 - _0F_E4 */ 0xcde0, +/* 1e5 - _0F_E5 */ 0xcdec, +/* 1e6 - _0F_E6 */ 0xcdf8, +/* 1e7 - _0F_E7 */ 0xce04, +/* 1e8 - _0F_E8 */ 0xce10, +/* 1e9 - _0F_E9 */ 0xce1c, +/* 1ea - _0F_EA */ 0xce28, +/* 1eb - _0F_EB */ 0xce34, +/* 1ec - _0F_EC */ 0xce40, +/* 1ed - _0F_ED */ 0xce4c, +/* 1ee - _0F_EE */ 0xce58, +/* 1ef - _0F_EF */ 0xce64, +/* 1f0 - _0F_F0 */ 0xce70, +/* 1f1 - _0F_F1 */ 0xce7c, +/* 1f2 - _0F_F2 */ 0xce88, +/* 1f3 - _0F_F3 */ 0xce94, +/* 1f4 - _0F_F4 */ 0xcea0, +/* 1f5 - _0F_F5 */ 0xceac, +/* 1f6 - _0F_F6 */ 0xceb8, +/* 1f7 - _0F_F7 */ 0xcec4, +/* 1f8 - _0F_F8 */ 0xced0, +/* 1f9 - _0F_F9 */ 0xcedc, +/* 1fa - _0F_FA */ 0xcee8, +/* 1fb - _0F_FB */ 0xcef4, +/* 1fc - _0F_FC */ 0xcf00, +/* 1fd - _0F_FD */ 0xcf0c, +/* 1fe - _0F_FE */ 0xcf18, +/* 1ff - */ 0, +/* 200 - _80_00 */ 0x213a, +/* 201 - _80_01 */ 0x213b, +/* 202 - _80_02 */ 0x213c, +/* 203 - _80_03 */ 0x213d, +/* 204 - _80_04 */ 0x213e, +/* 205 - _80_05 */ 0x213f, +/* 206 - _80_06 */ 0x2140, +/* 207 - _80_07 */ 0x2141, +/* 208 - _81_00 */ 0x2142, +/* 209 - _81_01 */ 0x2143, +/* 20a - _81_02 */ 0x2144, +/* 20b - _81_03 */ 0x2145, +/* 20c - _81_04 */ 0x2146, +/* 20d - _81_05 */ 0x2147, +/* 20e - _81_06 */ 0x2148, +/* 20f - _81_07 */ 0x2149, +/* 210 - _82_00 */ 0x214a, +/* 211 - _82_01 */ 0x214b, +/* 212 - _82_02 */ 0x214c, +/* 213 - _82_03 */ 0x214d, +/* 214 - _82_04 */ 0x214e, +/* 215 - _82_05 */ 0x214f, +/* 216 - _82_06 */ 0x2150, +/* 217 - _82_07 */ 0x2151, +/* 218 - _83_00 */ 0x2152, +/* 219 - _83_01 */ 0x2153, +/* 21a - _83_02 */ 0x2154, +/* 21b - _83_03 */ 0x2155, +/* 21c - _83_04 */ 0x2156, +/* 21d - _83_05 */ 0x2157, +/* 21e - _83_06 */ 0x2158, +/* 21f - _83_07 */ 0x2159, +/* 220 - _8F_00 */ 0x215a, +/* 221 - */ 0, +/* 222 - */ 0, +/* 223 - */ 0, +/* 224 - */ 0, +/* 225 - */ 0, +/* 226 - */ 0, +/* 227 - */ 0, +/* 228 - _C0_00 */ 0x215b, +/* 229 - _C0_01 */ 0x215c, +/* 22a - _C0_02 */ 0x215d, +/* 22b - _C0_03 */ 0x215e, +/* 22c - _C0_04 */ 0x215f, +/* 22d - _C0_05 */ 0x2160, +/* 22e - _C0_06 */ 0x2161, +/* 22f - _C0_07 */ 0x2162, +/* 230 - _C1_00 */ 0x2163, +/* 231 - _C1_01 */ 0x2164, +/* 232 - _C1_02 */ 0x2165, +/* 233 - _C1_03 */ 0x2166, +/* 234 - _C1_04 */ 0x2167, +/* 235 - _C1_05 */ 0x2168, +/* 236 - _C1_06 */ 0x2169, +/* 237 - _C1_07 */ 0x216a, +/* 238 - _C6_00 */ 0x216b, +/* 239 - */ 0, +/* 23a - */ 0, +/* 23b - */ 0, +/* 23c - */ 0, +/* 23d - */ 0, +/* 23e - */ 0, +/* 23f - */ 0, +/* 240 - */ 0, +/* 241 - */ 0, +/* 242 - */ 0, +/* 243 - */ 0, +/* 244 - */ 0, +/* 245 - */ 0, +/* 246 - */ 0, +/* 247 - */ 0, +/* 248 - */ 0, +/* 249 - */ 0, +/* 24a - */ 0, +/* 24b - */ 0, +/* 24c - */ 0, +/* 24d - */ 0, +/* 24e - */ 0, +/* 24f - */ 0, +/* 250 - */ 0, +/* 251 - */ 0, +/* 252 - */ 0, +/* 253 - */ 0, +/* 254 - */ 0, +/* 255 - */ 0, +/* 256 - */ 0, +/* 257 - */ 0, +/* 258 - */ 0, +/* 259 - */ 0, +/* 25a - */ 0, +/* 25b - */ 0, +/* 25c - */ 0, +/* 25d - */ 0, +/* 25e - */ 0, +/* 25f - */ 0, +/* 260 - */ 0, +/* 261 - */ 0, +/* 262 - */ 0, +/* 263 - */ 0, +/* 264 - */ 0, +/* 265 - */ 0, +/* 266 - */ 0, +/* 267 - */ 0, +/* 268 - */ 0, +/* 269 - */ 0, +/* 26a - */ 0, +/* 26b - */ 0, +/* 26c - */ 0, +/* 26d - */ 0, +/* 26e - */ 0, +/* 26f - */ 0, +/* 270 - */ 0, +/* 271 - */ 0, +/* 272 - */ 0, +/* 273 - */ 0, +/* 274 - */ 0, +/* 275 - */ 0, +/* 276 - */ 0, +/* 277 - */ 0, +/* 278 - _C6_F8 */ 0x216c, +/* 279 - */ 0, +/* 27a - */ 0, +/* 27b - */ 0, +/* 27c - */ 0, +/* 27d - */ 0, +/* 27e - */ 0, +/* 27f - */ 0, +/* 280 - _C7_00 */ 0x216d, +/* 281 - */ 0, +/* 282 - */ 0, +/* 283 - */ 0, +/* 284 - */ 0, +/* 285 - */ 0, +/* 286 - */ 0, +/* 287 - */ 0, +/* 288 - */ 0, +/* 289 - */ 0, +/* 28a - */ 0, +/* 28b - */ 0, +/* 28c - */ 0, +/* 28d - */ 0, +/* 28e - */ 0, +/* 28f - */ 0, +/* 290 - */ 0, +/* 291 - */ 0, +/* 292 - */ 0, +/* 293 - */ 0, +/* 294 - */ 0, +/* 295 - */ 0, +/* 296 - */ 0, +/* 297 - */ 0, +/* 298 - */ 0, +/* 299 - */ 0, +/* 29a - */ 0, +/* 29b - */ 0, +/* 29c - */ 0, +/* 29d - */ 0, +/* 29e - */ 0, +/* 29f - */ 0, +/* 2a0 - */ 0, +/* 2a1 - */ 0, +/* 2a2 - */ 0, +/* 2a3 - */ 0, +/* 2a4 - */ 0, +/* 2a5 - */ 0, +/* 2a6 - */ 0, +/* 2a7 - */ 0, +/* 2a8 - */ 0, +/* 2a9 - */ 0, +/* 2aa - */ 0, +/* 2ab - */ 0, +/* 2ac - */ 0, +/* 2ad - */ 0, +/* 2ae - */ 0, +/* 2af - */ 0, +/* 2b0 - */ 0, +/* 2b1 - */ 0, +/* 2b2 - */ 0, +/* 2b3 - */ 0, +/* 2b4 - */ 0, +/* 2b5 - */ 0, +/* 2b6 - */ 0, +/* 2b7 - */ 0, +/* 2b8 - */ 0, +/* 2b9 - */ 0, +/* 2ba - */ 0, +/* 2bb - */ 0, +/* 2bc - */ 0, +/* 2bd - */ 0, +/* 2be - */ 0, +/* 2bf - */ 0, +/* 2c0 - _C7_F8 */ 0x216e, +/* 2c1 - */ 0, +/* 2c2 - */ 0, +/* 2c3 - */ 0, +/* 2c4 - */ 0, +/* 2c5 - */ 0, +/* 2c6 - */ 0, +/* 2c7 - */ 0, +/* 2c8 - _D0_00 */ 0x216f, +/* 2c9 - _D0_01 */ 0x2170, +/* 2ca - _D0_02 */ 0x2171, +/* 2cb - _D0_03 */ 0x2172, +/* 2cc - _D0_04 */ 0x2173, +/* 2cd - _D0_05 */ 0x2174, +/* 2ce - _D0_06 */ 0x2175, +/* 2cf - _D0_07 */ 0x2176, +/* 2d0 - _D1_00 */ 0x2177, +/* 2d1 - _D1_01 */ 0x2178, +/* 2d2 - _D1_02 */ 0x2179, +/* 2d3 - _D1_03 */ 0x217a, +/* 2d4 - _D1_04 */ 0x217b, +/* 2d5 - _D1_05 */ 0x217c, +/* 2d6 - _D1_06 */ 0x217d, +/* 2d7 - _D1_07 */ 0x217e, +/* 2d8 - _D2_00 */ 0x217f, +/* 2d9 - _D2_01 */ 0x2180, +/* 2da - _D2_02 */ 0x2181, +/* 2db - _D2_03 */ 0x2182, +/* 2dc - _D2_04 */ 0x2183, +/* 2dd - _D2_05 */ 0x2184, +/* 2de - _D2_06 */ 0x2185, +/* 2df - _D2_07 */ 0x2186, +/* 2e0 - _D3_00 */ 0x2187, +/* 2e1 - _D3_01 */ 0x2188, +/* 2e2 - _D3_02 */ 0x2189, +/* 2e3 - _D3_03 */ 0x218a, +/* 2e4 - _D3_04 */ 0x218b, +/* 2e5 - _D3_05 */ 0x218c, +/* 2e6 - _D3_06 */ 0x218d, +/* 2e7 - _D3_07 */ 0x218e, +/* 2e8 - _D8_00 */ 0x218f, +/* 2e9 - _D8_01 */ 0x2190, +/* 2ea - _D8_02 */ 0x2191, +/* 2eb - _D8_03 */ 0x2192, +/* 2ec - _D8_04 */ 0x2193, +/* 2ed - _D8_05 */ 0x2194, +/* 2ee - _D8_06 */ 0x2195, +/* 2ef - _D8_07 */ 0x2196, +/* 2f0 - _D8_C0 */ 0x2197, +/* 2f1 - _D8_C0 */ 0x2198, +/* 2f2 - _D8_C0 */ 0x2199, +/* 2f3 - _D8_C0 */ 0x219a, +/* 2f4 - _D8_C0 */ 0x219b, +/* 2f5 - _D8_C0 */ 0x219c, +/* 2f6 - _D8_C0 */ 0x219d, +/* 2f7 - _D8_C0 */ 0x219e, +/* 2f8 - _D8_C8 */ 0x219f, +/* 2f9 - _D8_C8 */ 0x21a0, +/* 2fa - _D8_C8 */ 0x21a1, +/* 2fb - _D8_C8 */ 0x21a2, +/* 2fc - _D8_C8 */ 0x21a3, +/* 2fd - _D8_C8 */ 0x21a4, +/* 2fe - _D8_C8 */ 0x21a5, +/* 2ff - _D8_C8 */ 0x21a6, +/* 300 - _D8_D0 */ 0x21a7, +/* 301 - _D8_D0 */ 0x21a8, +/* 302 - _D8_D0 */ 0x21a9, +/* 303 - _D8_D0 */ 0x21aa, +/* 304 - _D8_D0 */ 0x21ab, +/* 305 - _D8_D0 */ 0x21ac, +/* 306 - _D8_D0 */ 0x21ad, +/* 307 - _D8_D0 */ 0x21ae, +/* 308 - _D8_D8 */ 0x21af, +/* 309 - _D8_D9 */ 0x21b0, +/* 30a - _D8_D8 */ 0x21b1, +/* 30b - _D8_D8 */ 0x21b2, +/* 30c - _D8_D8 */ 0x21b3, +/* 30d - _D8_D8 */ 0x21b4, +/* 30e - _D8_D8 */ 0x21b5, +/* 30f - _D8_D8 */ 0x21b6, +/* 310 - _D8_E0 */ 0x21b7, +/* 311 - _D8_E0 */ 0x21b8, +/* 312 - _D8_E0 */ 0x21b9, +/* 313 - _D8_E0 */ 0x21ba, +/* 314 - _D8_E0 */ 0x21bb, +/* 315 - _D8_E0 */ 0x21bc, +/* 316 - _D8_E0 */ 0x21bd, +/* 317 - _D8_E0 */ 0x21be, +/* 318 - _D8_E8 */ 0x21bf, +/* 319 - _D8_E8 */ 0x21c0, +/* 31a - _D8_E8 */ 0x21c1, +/* 31b - _D8_E8 */ 0x21c2, +/* 31c - _D8_E8 */ 0x21c3, +/* 31d - _D8_E8 */ 0x21c4, +/* 31e - _D8_E8 */ 0x21c5, +/* 31f - _D8_E8 */ 0x21c6, +/* 320 - _D8_F0 */ 0x21c7, +/* 321 - _D8_F0 */ 0x21c8, +/* 322 - _D8_F0 */ 0x21c9, +/* 323 - _D8_F0 */ 0x21ca, +/* 324 - _D8_F0 */ 0x21cb, +/* 325 - _D8_F0 */ 0x21cc, +/* 326 - _D8_F0 */ 0x21cd, +/* 327 - _D8_F0 */ 0x21ce, +/* 328 - _D8_F8 */ 0x21cf, +/* 329 - _D8_F8 */ 0x21d0, +/* 32a - _D8_F8 */ 0x21d1, +/* 32b - _D8_F8 */ 0x21d2, +/* 32c - _D8_F8 */ 0x21d3, +/* 32d - _D8_F8 */ 0x21d4, +/* 32e - _D8_F8 */ 0x21d5, +/* 32f - _D8_F8 */ 0x21d6, +/* 330 - _D9_00 */ 0x21d7, +/* 331 - */ 0, +/* 332 - _D9_02 */ 0x21d8, +/* 333 - _D9_03 */ 0x21d9, +/* 334 - _D9_04 */ 0x21da, +/* 335 - _D9_05 */ 0x21db, +/* 336 - _D9_06 */ 0xcf24, +/* 337 - _D9_07 */ 0xcf30, +/* 338 - _D9_C0 */ 0x21dc, +/* 339 - _D9_C0 */ 0x21dd, +/* 33a - _D9_C0 */ 0x21de, +/* 33b - _D9_C0 */ 0x21df, +/* 33c - _D9_C0 */ 0x21e0, +/* 33d - _D9_C0 */ 0x21e1, +/* 33e - _D9_C0 */ 0x21e2, +/* 33f - _D9_C0 */ 0x21e3, +/* 340 - _D9_C8 */ 0x21e4, +/* 341 - _D9_C9 */ 0x21e5, +/* 342 - _D9_C8 */ 0x21e6, +/* 343 - _D9_C8 */ 0x21e7, +/* 344 - _D9_C8 */ 0x21e8, +/* 345 - _D9_C8 */ 0x21e9, +/* 346 - _D9_C8 */ 0x21ea, +/* 347 - _D9_C8 */ 0x21eb, +/* 348 - _D9_D0 */ 0x21ec, +/* 349 - */ 0, +/* 34a - */ 0, +/* 34b - */ 0, +/* 34c - */ 0, +/* 34d - */ 0, +/* 34e - */ 0, +/* 34f - */ 0, +/* 350 - */ 0, +/* 351 - */ 0, +/* 352 - */ 0, +/* 353 - */ 0, +/* 354 - */ 0, +/* 355 - */ 0, +/* 356 - */ 0, +/* 357 - */ 0, +/* 358 - _D9_E0 */ 0x21ed, +/* 359 - _D9_E1 */ 0x21ee, +/* 35a - */ 0, +/* 35b - */ 0, +/* 35c - _D9_E4 */ 0x21ef, +/* 35d - _D9_E5 */ 0x21f0, +/* 35e - */ 0, +/* 35f - */ 0, +/* 360 - _D9_E8 */ 0x21f1, +/* 361 - _D9_E9 */ 0x21f2, +/* 362 - _D9_EA */ 0x21f3, +/* 363 - _D9_EB */ 0x21f4, +/* 364 - _D9_EC */ 0x21f5, +/* 365 - _D9_ED */ 0x21f6, +/* 366 - _D9_EE */ 0x21f7, +/* 367 - */ 0, +/* 368 - _D9_F0 */ 0x21f8, +/* 369 - _D9_F1 */ 0x21f9, +/* 36a - _D9_F2 */ 0x21fa, +/* 36b - _D9_F3 */ 0x21fb, +/* 36c - _D9_F4 */ 0x21fc, +/* 36d - _D9_F5 */ 0x21fd, +/* 36e - _D9_F6 */ 0x21fe, +/* 36f - _D9_F7 */ 0x21ff, +/* 370 - _D9_F8 */ 0x2200, +/* 371 - _D9_F9 */ 0x2201, +/* 372 - _D9_FA */ 0x2202, +/* 373 - _D9_FB */ 0x2203, +/* 374 - _D9_FC */ 0x2204, +/* 375 - _D9_FD */ 0x2205, +/* 376 - _D9_FE */ 0x2206, +/* 377 - _D9_FF */ 0x2207, +/* 378 - _DA_00 */ 0x2208, +/* 379 - _DA_01 */ 0x2209, +/* 37a - _DA_02 */ 0x220a, +/* 37b - _DA_03 */ 0x220b, +/* 37c - _DA_04 */ 0x220c, +/* 37d - _DA_05 */ 0x220d, +/* 37e - _DA_06 */ 0x220e, +/* 37f - _DA_07 */ 0x220f, +/* 380 - _DA_C0 */ 0x2210, +/* 381 - _DA_C0 */ 0x2211, +/* 382 - _DA_C0 */ 0x2212, +/* 383 - _DA_C0 */ 0x2213, +/* 384 - _DA_C0 */ 0x2214, +/* 385 - _DA_C0 */ 0x2215, +/* 386 - _DA_C0 */ 0x2216, +/* 387 - _DA_C0 */ 0x2217, +/* 388 - _DA_C8 */ 0x2218, +/* 389 - _DA_C8 */ 0x2219, +/* 38a - _DA_C8 */ 0x221a, +/* 38b - _DA_C8 */ 0x221b, +/* 38c - _DA_C8 */ 0x221c, +/* 38d - _DA_C8 */ 0x221d, +/* 38e - _DA_C8 */ 0x221e, +/* 38f - _DA_C8 */ 0x221f, +/* 390 - _DA_D0 */ 0x2220, +/* 391 - _DA_D0 */ 0x2221, +/* 392 - _DA_D0 */ 0x2222, +/* 393 - _DA_D0 */ 0x2223, +/* 394 - _DA_D0 */ 0x2224, +/* 395 - _DA_D0 */ 0x2225, +/* 396 - _DA_D0 */ 0x2226, +/* 397 - _DA_D0 */ 0x2227, +/* 398 - _DA_D8 */ 0x2228, +/* 399 - _DA_D8 */ 0x2229, +/* 39a - _DA_D8 */ 0x222a, +/* 39b - _DA_D8 */ 0x222b, +/* 39c - _DA_D8 */ 0x222c, +/* 39d - _DA_D8 */ 0x222d, +/* 39e - _DA_D8 */ 0x222e, +/* 39f - _DA_D8 */ 0x222f, +/* 3a0 - */ 0, +/* 3a1 - */ 0, +/* 3a2 - */ 0, +/* 3a3 - */ 0, +/* 3a4 - */ 0, +/* 3a5 - */ 0, +/* 3a6 - */ 0, +/* 3a7 - */ 0, +/* 3a8 - */ 0, +/* 3a9 - _DA_E9 */ 0x2230, +/* 3aa - */ 0, +/* 3ab - */ 0, +/* 3ac - */ 0, +/* 3ad - */ 0, +/* 3ae - */ 0, +/* 3af - */ 0, +/* 3b0 - */ 0, +/* 3b1 - */ 0, +/* 3b2 - */ 0, +/* 3b3 - */ 0, +/* 3b4 - */ 0, +/* 3b5 - */ 0, +/* 3b6 - */ 0, +/* 3b7 - */ 0, +/* 3b8 - */ 0, +/* 3b9 - */ 0, +/* 3ba - */ 0, +/* 3bb - */ 0, +/* 3bc - */ 0, +/* 3bd - */ 0, +/* 3be - */ 0, +/* 3bf - */ 0, +/* 3c0 - _DB_00 */ 0x2231, +/* 3c1 - _DB_01 */ 0x2232, +/* 3c2 - _DB_02 */ 0x2233, +/* 3c3 - _DB_03 */ 0x2234, +/* 3c4 - */ 0, +/* 3c5 - _DB_05 */ 0x2235, +/* 3c6 - */ 0, +/* 3c7 - _DB_07 */ 0x2236, +/* 3c8 - _DB_C0 */ 0x2237, +/* 3c9 - _DB_C0 */ 0x2238, +/* 3ca - _DB_C0 */ 0x2239, +/* 3cb - _DB_C0 */ 0x223a, +/* 3cc - _DB_C0 */ 0x223b, +/* 3cd - _DB_C0 */ 0x223c, +/* 3ce - _DB_C0 */ 0x223d, +/* 3cf - _DB_C0 */ 0x223e, +/* 3d0 - _DB_C8 */ 0x223f, +/* 3d1 - _DB_C8 */ 0x2240, +/* 3d2 - _DB_C8 */ 0x2241, +/* 3d3 - _DB_C8 */ 0x2242, +/* 3d4 - _DB_C8 */ 0x2243, +/* 3d5 - _DB_C8 */ 0x2244, +/* 3d6 - _DB_C8 */ 0x2245, +/* 3d7 - _DB_C8 */ 0x2246, +/* 3d8 - _DB_D0 */ 0x2247, +/* 3d9 - _DB_D0 */ 0x2248, +/* 3da - _DB_D0 */ 0x2249, +/* 3db - _DB_D0 */ 0x224a, +/* 3dc - _DB_D0 */ 0x224b, +/* 3dd - _DB_D0 */ 0x224c, +/* 3de - _DB_D0 */ 0x224d, +/* 3df - _DB_D0 */ 0x224e, +/* 3e0 - _DB_D8 */ 0x224f, +/* 3e1 - _DB_D8 */ 0x2250, +/* 3e2 - _DB_D8 */ 0x2251, +/* 3e3 - _DB_D8 */ 0x2252, +/* 3e4 - _DB_D8 */ 0x2253, +/* 3e5 - _DB_D8 */ 0x2254, +/* 3e6 - _DB_D8 */ 0x2255, +/* 3e7 - _DB_D8 */ 0x2256, +/* 3e8 - _DB_E0 */ 0x2257, +/* 3e9 - _DB_E1 */ 0x2258, +/* 3ea - _DB_E2 */ 0xcf3c, +/* 3eb - _DB_E3 */ 0xcf48, +/* 3ec - _DB_E4 */ 0x2259, +/* 3ed - */ 0, +/* 3ee - */ 0, +/* 3ef - */ 0, +/* 3f0 - _DB_E8 */ 0x225a, +/* 3f1 - _DB_E8 */ 0x225b, +/* 3f2 - _DB_E8 */ 0x225c, +/* 3f3 - _DB_E8 */ 0x225d, +/* 3f4 - _DB_E8 */ 0x225e, +/* 3f5 - _DB_E8 */ 0x225f, +/* 3f6 - _DB_E8 */ 0x2260, +/* 3f7 - _DB_E8 */ 0x2261, +/* 3f8 - _DB_F0 */ 0x2262, +/* 3f9 - _DB_F0 */ 0x2263, +/* 3fa - _DB_F0 */ 0x2264, +/* 3fb - _DB_F0 */ 0x2265, +/* 3fc - _DB_F0 */ 0x2266, +/* 3fd - _DB_F0 */ 0x2267, +/* 3fe - _DB_F0 */ 0x2268, +/* 3ff - _DB_F0 */ 0x2269, +/* 400 - */ 0, +/* 401 - */ 0, +/* 402 - */ 0, +/* 403 - */ 0, +/* 404 - */ 0, +/* 405 - */ 0, +/* 406 - */ 0, +/* 407 - */ 0, +/* 408 - _DC_00 */ 0x226a, +/* 409 - _DC_01 */ 0x226b, +/* 40a - _DC_02 */ 0x226c, +/* 40b - _DC_03 */ 0x226d, +/* 40c - _DC_04 */ 0x226e, +/* 40d - _DC_05 */ 0x226f, +/* 40e - _DC_06 */ 0x2270, +/* 40f - _DC_07 */ 0x2271, +/* 410 - _DC_C0 */ 0x2272, +/* 411 - _DC_C0 */ 0x2273, +/* 412 - _DC_C0 */ 0x2274, +/* 413 - _DC_C0 */ 0x2275, +/* 414 - _DC_C0 */ 0x2276, +/* 415 - _DC_C0 */ 0x2277, +/* 416 - _DC_C0 */ 0x2278, +/* 417 - _DC_C0 */ 0x2279, +/* 418 - _DC_C8 */ 0x227a, +/* 419 - _DC_C8 */ 0x227b, +/* 41a - _DC_C8 */ 0x227c, +/* 41b - _DC_C8 */ 0x227d, +/* 41c - _DC_C8 */ 0x227e, +/* 41d - _DC_C8 */ 0x227f, +/* 41e - _DC_C8 */ 0x2280, +/* 41f - _DC_C8 */ 0x2281, +/* 420 - */ 0, +/* 421 - */ 0, +/* 422 - */ 0, +/* 423 - */ 0, +/* 424 - */ 0, +/* 425 - */ 0, +/* 426 - */ 0, +/* 427 - */ 0, +/* 428 - */ 0, +/* 429 - */ 0, +/* 42a - */ 0, +/* 42b - */ 0, +/* 42c - */ 0, +/* 42d - */ 0, +/* 42e - */ 0, +/* 42f - */ 0, +/* 430 - _DC_E0 */ 0x2282, +/* 431 - _DC_E0 */ 0x2283, +/* 432 - _DC_E0 */ 0x2284, +/* 433 - _DC_E0 */ 0x2285, +/* 434 - _DC_E0 */ 0x2286, +/* 435 - _DC_E0 */ 0x2287, +/* 436 - _DC_E0 */ 0x2288, +/* 437 - _DC_E0 */ 0x2289, +/* 438 - _DC_E8 */ 0x228a, +/* 439 - _DC_E8 */ 0x228b, +/* 43a - _DC_E8 */ 0x228c, +/* 43b - _DC_E8 */ 0x228d, +/* 43c - _DC_E8 */ 0x228e, +/* 43d - _DC_E8 */ 0x228f, +/* 43e - _DC_E8 */ 0x2290, +/* 43f - _DC_E8 */ 0x2291, +/* 440 - _DC_F0 */ 0x2292, +/* 441 - _DC_F0 */ 0x2293, +/* 442 - _DC_F0 */ 0x2294, +/* 443 - _DC_F0 */ 0x2295, +/* 444 - _DC_F0 */ 0x2296, +/* 445 - _DC_F0 */ 0x2297, +/* 446 - _DC_F0 */ 0x2298, +/* 447 - _DC_F0 */ 0x2299, +/* 448 - _DC_F8 */ 0x229a, +/* 449 - _DC_F8 */ 0x229b, +/* 44a - _DC_F8 */ 0x229c, +/* 44b - _DC_F8 */ 0x229d, +/* 44c - _DC_F8 */ 0x229e, +/* 44d - _DC_F8 */ 0x229f, +/* 44e - _DC_F8 */ 0x22a0, +/* 44f - _DC_F8 */ 0x22a1, +/* 450 - _DD_00 */ 0x22a2, +/* 451 - _DD_01 */ 0x22a3, +/* 452 - _DD_02 */ 0x22a4, +/* 453 - _DD_03 */ 0x22a5, +/* 454 - _DD_04 */ 0x22a6, +/* 455 - */ 0, +/* 456 - _DD_06 */ 0xcf54, +/* 457 - _DD_07 */ 0xcf60, +/* 458 - _DD_C0 */ 0x22a7, +/* 459 - _DD_C0 */ 0x22a8, +/* 45a - _DD_C0 */ 0x22a9, +/* 45b - _DD_C0 */ 0x22aa, +/* 45c - _DD_C0 */ 0x22ab, +/* 45d - _DD_C0 */ 0x22ac, +/* 45e - _DD_C0 */ 0x22ad, +/* 45f - _DD_C0 */ 0x22ae, +/* 460 - */ 0, +/* 461 - */ 0, +/* 462 - */ 0, +/* 463 - */ 0, +/* 464 - */ 0, +/* 465 - */ 0, +/* 466 - */ 0, +/* 467 - */ 0, +/* 468 - _DD_D0 */ 0x22af, +/* 469 - _DD_D0 */ 0x22b0, +/* 46a - _DD_D0 */ 0x22b1, +/* 46b - _DD_D0 */ 0x22b2, +/* 46c - _DD_D0 */ 0x22b3, +/* 46d - _DD_D0 */ 0x22b4, +/* 46e - _DD_D0 */ 0x22b5, +/* 46f - _DD_D0 */ 0x22b6, +/* 470 - _DD_D8 */ 0x22b7, +/* 471 - _DD_D8 */ 0x22b8, +/* 472 - _DD_D8 */ 0x22b9, +/* 473 - _DD_D8 */ 0x22ba, +/* 474 - _DD_D8 */ 0x22bb, +/* 475 - _DD_D8 */ 0x22bc, +/* 476 - _DD_D8 */ 0x22bd, +/* 477 - _DD_D8 */ 0x22be, +/* 478 - _DD_E0 */ 0x22bf, +/* 479 - _DD_E1 */ 0x22c0, +/* 47a - _DD_E0 */ 0x22c1, +/* 47b - _DD_E0 */ 0x22c2, +/* 47c - _DD_E0 */ 0x22c3, +/* 47d - _DD_E0 */ 0x22c4, +/* 47e - _DD_E0 */ 0x22c5, +/* 47f - _DD_E0 */ 0x22c6, +/* 480 - _DD_E8 */ 0x22c7, +/* 481 - _DD_E9 */ 0x22c8, +/* 482 - _DD_E8 */ 0x22c9, +/* 483 - _DD_E8 */ 0x22ca, +/* 484 - _DD_E8 */ 0x22cb, +/* 485 - _DD_E8 */ 0x22cc, +/* 486 - _DD_E8 */ 0x22cd, +/* 487 - _DD_E8 */ 0x22ce, +/* 488 - */ 0, +/* 489 - */ 0, +/* 48a - */ 0, +/* 48b - */ 0, +/* 48c - */ 0, +/* 48d - */ 0, +/* 48e - */ 0, +/* 48f - */ 0, +/* 490 - */ 0, +/* 491 - */ 0, +/* 492 - */ 0, +/* 493 - */ 0, +/* 494 - */ 0, +/* 495 - */ 0, +/* 496 - */ 0, +/* 497 - */ 0, +/* 498 - _DE_00 */ 0x22cf, +/* 499 - _DE_01 */ 0x22d0, +/* 49a - _DE_02 */ 0x22d1, +/* 49b - _DE_03 */ 0x22d2, +/* 49c - _DE_04 */ 0x22d3, +/* 49d - _DE_05 */ 0x22d4, +/* 49e - _DE_06 */ 0x22d5, +/* 49f - _DE_07 */ 0x22d6, +/* 4a0 - _DE_C0 */ 0x22d7, +/* 4a1 - _DE_C1 */ 0x22d8, +/* 4a2 - _DE_C0 */ 0x22d9, +/* 4a3 - _DE_C0 */ 0x22da, +/* 4a4 - _DE_C0 */ 0x22db, +/* 4a5 - _DE_C0 */ 0x22dc, +/* 4a6 - _DE_C0 */ 0x22dd, +/* 4a7 - _DE_C0 */ 0x22de, +/* 4a8 - _DE_C8 */ 0x22df, +/* 4a9 - _DE_C9 */ 0x22e0, +/* 4aa - _DE_C8 */ 0x22e1, +/* 4ab - _DE_C8 */ 0x22e2, +/* 4ac - _DE_C8 */ 0x22e3, +/* 4ad - _DE_C8 */ 0x22e4, +/* 4ae - _DE_C8 */ 0x22e5, +/* 4af - _DE_C8 */ 0x22e6, +/* 4b0 - */ 0, +/* 4b1 - */ 0, +/* 4b2 - */ 0, +/* 4b3 - */ 0, +/* 4b4 - */ 0, +/* 4b5 - */ 0, +/* 4b6 - */ 0, +/* 4b7 - */ 0, +/* 4b8 - */ 0, +/* 4b9 - _DE_D9 */ 0x22e7, +/* 4ba - */ 0, +/* 4bb - */ 0, +/* 4bc - */ 0, +/* 4bd - */ 0, +/* 4be - */ 0, +/* 4bf - */ 0, +/* 4c0 - _DE_E0 */ 0x22e8, +/* 4c1 - _DE_E1 */ 0x22e9, +/* 4c2 - _DE_E0 */ 0x22ea, +/* 4c3 - _DE_E0 */ 0x22eb, +/* 4c4 - _DE_E0 */ 0x22ec, +/* 4c5 - _DE_E0 */ 0x22ed, +/* 4c6 - _DE_E0 */ 0x22ee, +/* 4c7 - _DE_E0 */ 0x22ef, +/* 4c8 - _DE_E8 */ 0x22f0, +/* 4c9 - _DE_E9 */ 0x22f1, +/* 4ca - _DE_E8 */ 0x22f2, +/* 4cb - _DE_E8 */ 0x22f3, +/* 4cc - _DE_E8 */ 0x22f4, +/* 4cd - _DE_E8 */ 0x22f5, +/* 4ce - _DE_E8 */ 0x22f6, +/* 4cf - _DE_E8 */ 0x22f7, +/* 4d0 - _DE_F0 */ 0x22f8, +/* 4d1 - _DE_F1 */ 0x22f9, +/* 4d2 - _DE_F0 */ 0x22fa, +/* 4d3 - _DE_F0 */ 0x22fb, +/* 4d4 - _DE_F0 */ 0x22fc, +/* 4d5 - _DE_F0 */ 0x22fd, +/* 4d6 - _DE_F0 */ 0x22fe, +/* 4d7 - _DE_F0 */ 0x22ff, +/* 4d8 - _DE_F8 */ 0x2300, +/* 4d9 - _DE_F9 */ 0x2301, +/* 4da - _DE_F8 */ 0x2302, +/* 4db - _DE_F8 */ 0x2303, +/* 4dc - _DE_F8 */ 0x2304, +/* 4dd - _DE_F8 */ 0x2305, +/* 4de - _DE_F8 */ 0x2306, +/* 4df - _DE_F8 */ 0x2307, +/* 4e0 - _DF_00 */ 0x2308, +/* 4e1 - _DF_01 */ 0x2309, +/* 4e2 - _DF_02 */ 0x230a, +/* 4e3 - _DF_03 */ 0x230b, +/* 4e4 - _DF_04 */ 0x230c, +/* 4e5 - _DF_05 */ 0x230d, +/* 4e6 - _DF_06 */ 0x230e, +/* 4e7 - _DF_07 */ 0x230f, +/* 4e8 - */ 0, +/* 4e9 - */ 0, +/* 4ea - */ 0, +/* 4eb - */ 0, +/* 4ec - */ 0, +/* 4ed - */ 0, +/* 4ee - */ 0, +/* 4ef - */ 0, +/* 4f0 - */ 0, +/* 4f1 - */ 0, +/* 4f2 - */ 0, +/* 4f3 - */ 0, +/* 4f4 - */ 0, +/* 4f5 - */ 0, +/* 4f6 - */ 0, +/* 4f7 - */ 0, +/* 4f8 - */ 0, +/* 4f9 - */ 0, +/* 4fa - */ 0, +/* 4fb - */ 0, +/* 4fc - */ 0, +/* 4fd - */ 0, +/* 4fe - */ 0, +/* 4ff - */ 0, +/* 500 - */ 0, +/* 501 - */ 0, +/* 502 - */ 0, +/* 503 - */ 0, +/* 504 - */ 0, +/* 505 - */ 0, +/* 506 - */ 0, +/* 507 - */ 0, +/* 508 - _DF_E0 */ 0xcf6c, +/* 509 - */ 0, +/* 50a - */ 0, +/* 50b - */ 0, +/* 50c - */ 0, +/* 50d - */ 0, +/* 50e - */ 0, +/* 50f - */ 0, +/* 510 - _DF_E8 */ 0x2310, +/* 511 - _DF_E8 */ 0x2311, +/* 512 - _DF_E8 */ 0x2312, +/* 513 - _DF_E8 */ 0x2313, +/* 514 - _DF_E8 */ 0x2314, +/* 515 - _DF_E8 */ 0x2315, +/* 516 - _DF_E8 */ 0x2316, +/* 517 - _DF_E8 */ 0x2317, +/* 518 - _DF_F0 */ 0x2318, +/* 519 - _DF_F0 */ 0x2319, +/* 51a - _DF_F0 */ 0x231a, +/* 51b - _DF_F0 */ 0x231b, +/* 51c - _DF_F0 */ 0x231c, +/* 51d - _DF_F0 */ 0x231d, +/* 51e - _DF_F0 */ 0x231e, +/* 51f - _DF_F0 */ 0x231f, +/* 520 - */ 0, +/* 521 - */ 0, +/* 522 - */ 0, +/* 523 - */ 0, +/* 524 - */ 0, +/* 525 - */ 0, +/* 526 - */ 0, +/* 527 - */ 0, +/* 528 - _F6_00 */ 0x2320, +/* 529 - */ 0, +/* 52a - _F6_02 */ 0x2321, +/* 52b - _F6_03 */ 0x2322, +/* 52c - _F6_04 */ 0x2323, +/* 52d - _F6_05 */ 0x2324, +/* 52e - _F6_06 */ 0x2325, +/* 52f - _F6_07 */ 0x2326, +/* 530 - _F7_00 */ 0x2327, +/* 531 - */ 0, +/* 532 - _F7_02 */ 0x2328, +/* 533 - _F7_03 */ 0x2329, +/* 534 - _F7_04 */ 0x232a, +/* 535 - _F7_05 */ 0x232b, +/* 536 - _F7_06 */ 0x232c, +/* 537 - _F7_07 */ 0x232d, +/* 538 - _FE_00 */ 0x232e, +/* 539 - _FE_01 */ 0x232f, +/* 53a - */ 0, +/* 53b - */ 0, +/* 53c - */ 0, +/* 53d - */ 0, +/* 53e - */ 0, +/* 53f - */ 0, +/* 540 - _FF_00 */ 0x2330, +/* 541 - _FF_01 */ 0x2331, +/* 542 - _FF_02 */ 0x2332, +/* 543 - _FF_03 */ 0x2333, +/* 544 - _FF_04 */ 0x2334, +/* 545 - _FF_05 */ 0x2335, +/* 546 - _FF_06 */ 0x2336, +/* 547 - */ 0, +/* 548 - _0F_00_00 */ 0x2337, +/* 549 - _0F_00_01 */ 0x2338, +/* 54a - _0F_00_02 */ 0x2339, +/* 54b - _0F_00_03 */ 0x233a, +/* 54c - _0F_00_04 */ 0x233b, +/* 54d - _0F_00_05 */ 0x233c, +/* 54e - */ 0, +/* 54f - */ 0, +/* 550 - _0F_01_00 */ 0x233d, +/* 551 - _0F_01_01 */ 0x233e, +/* 552 - _0F_01_02 */ 0x233f, +/* 553 - _0F_01_03 */ 0x2340, +/* 554 - _0F_01_04 */ 0x2341, +/* 555 - */ 0, +/* 556 - _0F_01_06 */ 0x2342, +/* 557 - _0F_01_07 */ 0x2343, +/* 558 - */ 0, +/* 559 - _0F_01_C1 */ 0x2344, +/* 55a - _0F_01_C2 */ 0x2345, +/* 55b - _0F_01_C3 */ 0x2346, +/* 55c - _0F_01_C4 */ 0x2347, +/* 55d - */ 0, +/* 55e - */ 0, +/* 55f - */ 0, +/* 560 - _0F_01_C8 */ 0x2348, +/* 561 - _0F_01_C9 */ 0x2349, +/* 562 - */ 0, +/* 563 - */ 0, +/* 564 - */ 0, +/* 565 - */ 0, +/* 566 - */ 0, +/* 567 - */ 0, +/* 568 - _0F_01_D0 */ 0x234a, +/* 569 - _0F_01_D1 */ 0x234b, +/* 56a - */ 0, +/* 56b - */ 0, +/* 56c - _0F_01_D4 */ 0x234c, +/* 56d - _0F_01_D5 */ 0x234d, +/* 56e - */ 0, +/* 56f - */ 0, +/* 570 - _0F_01_D8 */ 0x234e, +/* 571 - _0F_01_D9 */ 0x234f, +/* 572 - _0F_01_DA */ 0x2350, +/* 573 - _0F_01_DB */ 0x2351, +/* 574 - _0F_01_DC */ 0x2352, +/* 575 - _0F_01_DD */ 0x2353, +/* 576 - _0F_01_DE */ 0x2354, +/* 577 - _0F_01_DF */ 0x2355, +/* 578 - */ 0, +/* 579 - */ 0, +/* 57a - */ 0, +/* 57b - */ 0, +/* 57c - */ 0, +/* 57d - */ 0, +/* 57e - */ 0, +/* 57f - */ 0, +/* 580 - */ 0, +/* 581 - */ 0, +/* 582 - */ 0, +/* 583 - */ 0, +/* 584 - */ 0, +/* 585 - */ 0, +/* 586 - */ 0, +/* 587 - */ 0, +/* 588 - */ 0, +/* 589 - */ 0, +/* 58a - */ 0, +/* 58b - */ 0, +/* 58c - */ 0, +/* 58d - */ 0, +/* 58e - */ 0, +/* 58f - */ 0, +/* 590 - _0F_01_F8 */ 0x2356, +/* 591 - _0F_01_F9 */ 0x2357, +/* 592 - */ 0, +/* 593 - */ 0, +/* 594 - */ 0, +/* 595 - */ 0, +/* 596 - */ 0, +/* 597 - */ 0, +/* 598 - _0F_0D_00 */ 0x2358, +/* 599 - _0F_0D_01 */ 0x2359, +/* 59a - */ 0, +/* 59b - */ 0, +/* 59c - */ 0, +/* 59d - */ 0, +/* 59e - */ 0, +/* 59f - */ 0, +/* 5a0 - */ 0, +/* 5a1 - */ 0, +/* 5a2 - */ 0, +/* 5a3 - */ 0, +/* 5a4 - */ 0, +/* 5a5 - */ 0, +/* 5a6 - */ 0, +/* 5a7 - */ 0, +/* 5a8 - */ 0, +/* 5a9 - */ 0, +/* 5aa - */ 0, +/* 5ab - */ 0, +/* 5ac - _0F_0F_0C */ 0x235a, +/* 5ad - _0F_0F_0D */ 0x235b, +/* 5ae - */ 0, +/* 5af - */ 0, +/* 5b0 - */ 0, +/* 5b1 - */ 0, +/* 5b2 - */ 0, +/* 5b3 - */ 0, +/* 5b4 - */ 0, +/* 5b5 - */ 0, +/* 5b6 - */ 0, +/* 5b7 - */ 0, +/* 5b8 - */ 0, +/* 5b9 - */ 0, +/* 5ba - */ 0, +/* 5bb - */ 0, +/* 5bc - _0F_0F_1C */ 0x235c, +/* 5bd - _0F_0F_1D */ 0x235d, +/* 5be - */ 0, +/* 5bf - */ 0, +/* 5c0 - */ 0, +/* 5c1 - */ 0, +/* 5c2 - */ 0, +/* 5c3 - */ 0, +/* 5c4 - */ 0, +/* 5c5 - */ 0, +/* 5c6 - */ 0, +/* 5c7 - */ 0, +/* 5c8 - */ 0, +/* 5c9 - */ 0, +/* 5ca - */ 0, +/* 5cb - */ 0, +/* 5cc - */ 0, +/* 5cd - */ 0, +/* 5ce - */ 0, +/* 5cf - */ 0, +/* 5d0 - */ 0, +/* 5d1 - */ 0, +/* 5d2 - */ 0, +/* 5d3 - */ 0, +/* 5d4 - */ 0, +/* 5d5 - */ 0, +/* 5d6 - */ 0, +/* 5d7 - */ 0, +/* 5d8 - */ 0, +/* 5d9 - */ 0, +/* 5da - */ 0, +/* 5db - */ 0, +/* 5dc - */ 0, +/* 5dd - */ 0, +/* 5de - */ 0, +/* 5df - */ 0, +/* 5e0 - */ 0, +/* 5e1 - */ 0, +/* 5e2 - */ 0, +/* 5e3 - */ 0, +/* 5e4 - */ 0, +/* 5e5 - */ 0, +/* 5e6 - */ 0, +/* 5e7 - */ 0, +/* 5e8 - */ 0, +/* 5e9 - */ 0, +/* 5ea - */ 0, +/* 5eb - */ 0, +/* 5ec - */ 0, +/* 5ed - */ 0, +/* 5ee - */ 0, +/* 5ef - */ 0, +/* 5f0 - */ 0, +/* 5f1 - */ 0, +/* 5f2 - */ 0, +/* 5f3 - */ 0, +/* 5f4 - */ 0, +/* 5f5 - */ 0, +/* 5f6 - */ 0, +/* 5f7 - */ 0, +/* 5f8 - */ 0, +/* 5f9 - */ 0, +/* 5fa - */ 0, +/* 5fb - */ 0, +/* 5fc - */ 0, +/* 5fd - */ 0, +/* 5fe - */ 0, +/* 5ff - */ 0, +/* 600 - */ 0, +/* 601 - */ 0, +/* 602 - */ 0, +/* 603 - */ 0, +/* 604 - */ 0, +/* 605 - */ 0, +/* 606 - */ 0, +/* 607 - */ 0, +/* 608 - */ 0, +/* 609 - */ 0, +/* 60a - */ 0, +/* 60b - */ 0, +/* 60c - */ 0, +/* 60d - */ 0, +/* 60e - */ 0, +/* 60f - */ 0, +/* 610 - */ 0, +/* 611 - */ 0, +/* 612 - */ 0, +/* 613 - */ 0, +/* 614 - */ 0, +/* 615 - */ 0, +/* 616 - */ 0, +/* 617 - */ 0, +/* 618 - */ 0, +/* 619 - */ 0, +/* 61a - */ 0, +/* 61b - */ 0, +/* 61c - */ 0, +/* 61d - */ 0, +/* 61e - */ 0, +/* 61f - */ 0, +/* 620 - */ 0, +/* 621 - */ 0, +/* 622 - */ 0, +/* 623 - */ 0, +/* 624 - */ 0, +/* 625 - */ 0, +/* 626 - */ 0, +/* 627 - */ 0, +/* 628 - */ 0, +/* 629 - */ 0, +/* 62a - _0F_0F_8A */ 0x235e, +/* 62b - */ 0, +/* 62c - */ 0, +/* 62d - */ 0, +/* 62e - _0F_0F_8E */ 0x235f, +/* 62f - */ 0, +/* 630 - _0F_0F_90 */ 0x2360, +/* 631 - */ 0, +/* 632 - */ 0, +/* 633 - */ 0, +/* 634 - _0F_0F_94 */ 0x2361, +/* 635 - */ 0, +/* 636 - _0F_0F_96 */ 0x2362, +/* 637 - _0F_0F_97 */ 0x2363, +/* 638 - */ 0, +/* 639 - */ 0, +/* 63a - _0F_0F_9A */ 0x2364, +/* 63b - */ 0, +/* 63c - */ 0, +/* 63d - */ 0, +/* 63e - _0F_0F_9E */ 0x2365, +/* 63f - */ 0, +/* 640 - _0F_0F_A0 */ 0x2366, +/* 641 - */ 0, +/* 642 - */ 0, +/* 643 - */ 0, +/* 644 - _0F_0F_A4 */ 0x2367, +/* 645 - */ 0, +/* 646 - _0F_0F_A6 */ 0x2368, +/* 647 - _0F_0F_A7 */ 0x2369, +/* 648 - */ 0, +/* 649 - */ 0, +/* 64a - _0F_0F_AA */ 0x236a, +/* 64b - */ 0, +/* 64c - */ 0, +/* 64d - */ 0, +/* 64e - _0F_0F_AE */ 0x236b, +/* 64f - */ 0, +/* 650 - _0F_0F_B0 */ 0x236c, +/* 651 - */ 0, +/* 652 - */ 0, +/* 653 - */ 0, +/* 654 - _0F_0F_B4 */ 0x236d, +/* 655 - */ 0, +/* 656 - _0F_0F_B6 */ 0x236e, +/* 657 - _0F_0F_B7 */ 0x236f, +/* 658 - */ 0, +/* 659 - */ 0, +/* 65a - */ 0, +/* 65b - _0F_0F_BB */ 0x2370, +/* 65c - */ 0, +/* 65d - */ 0, +/* 65e - */ 0, +/* 65f - _0F_0F_BF */ 0x2371, +/* 660 - */ 0, +/* 661 - */ 0, +/* 662 - */ 0, +/* 663 - */ 0, +/* 664 - */ 0, +/* 665 - */ 0, +/* 666 - */ 0, +/* 667 - */ 0, +/* 668 - */ 0, +/* 669 - */ 0, +/* 66a - */ 0, +/* 66b - */ 0, +/* 66c - */ 0, +/* 66d - */ 0, +/* 66e - */ 0, +/* 66f - */ 0, +/* 670 - */ 0, +/* 671 - */ 0, +/* 672 - */ 0, +/* 673 - */ 0, +/* 674 - */ 0, +/* 675 - */ 0, +/* 676 - */ 0, +/* 677 - */ 0, +/* 678 - */ 0, +/* 679 - */ 0, +/* 67a - */ 0, +/* 67b - */ 0, +/* 67c - */ 0, +/* 67d - */ 0, +/* 67e - */ 0, +/* 67f - */ 0, +/* 680 - */ 0, +/* 681 - */ 0, +/* 682 - */ 0, +/* 683 - */ 0, +/* 684 - */ 0, +/* 685 - */ 0, +/* 686 - */ 0, +/* 687 - */ 0, +/* 688 - */ 0, +/* 689 - */ 0, +/* 68a - */ 0, +/* 68b - */ 0, +/* 68c - */ 0, +/* 68d - */ 0, +/* 68e - */ 0, +/* 68f - */ 0, +/* 690 - */ 0, +/* 691 - */ 0, +/* 692 - */ 0, +/* 693 - */ 0, +/* 694 - */ 0, +/* 695 - */ 0, +/* 696 - */ 0, +/* 697 - */ 0, +/* 698 - */ 0, +/* 699 - */ 0, +/* 69a - */ 0, +/* 69b - */ 0, +/* 69c - */ 0, +/* 69d - */ 0, +/* 69e - */ 0, +/* 69f - */ 0, +/* 6a0 - _0F_10 */ 0x2372, +/* 6a1 - _66_0F_10 */ 0x2373, +/* 6a2 - _F3_0F_10 */ 0x2374, +/* 6a3 - _F2_0F_10 */ 0x2375, +/* 6a4 - _V_0F_10 */ 0x4009, +/* 6a5 - _V_66_0F_10 */ 0x400a, +/* 6a6 - _V_F3_0F_10 */ 0x400b, +/* 6a7 - _V_F2_0F_10 */ 0x400c, +/* 6a8 - */ 0, +/* 6a9 - */ 0, +/* 6aa - _VRR_F3_0F_10 */ 0x400d, +/* 6ab - _VRR_F2_0F_10 */ 0x400e, +/* 6ac - _0F_11 */ 0x2376, +/* 6ad - _66_0F_11 */ 0x2377, +/* 6ae - _F3_0F_11 */ 0x2378, +/* 6af - _F2_0F_11 */ 0x2379, +/* 6b0 - _V_0F_11 */ 0x400f, +/* 6b1 - _V_66_0F_11 */ 0x4010, +/* 6b2 - _V_F3_0F_11 */ 0x4011, +/* 6b3 - _V_F2_0F_11 */ 0x4012, +/* 6b4 - */ 0, +/* 6b5 - */ 0, +/* 6b6 - _VRR_F3_0F_11 */ 0x4013, +/* 6b7 - _VRR_F2_0F_11 */ 0x4014, +/* 6b8 - _0F_12 */ 0x4015, +/* 6b9 - _66_0F_12 */ 0x237a, +/* 6ba - _F3_0F_12 */ 0x237b, +/* 6bb - _F2_0F_12 */ 0x237c, +/* 6bc - _V_0F_12 */ 0x4016, +/* 6bd - _V_66_0F_12 */ 0x4017, +/* 6be - _V_F3_0F_12 */ 0x4018, +/* 6bf - _V_F2_0F_12 */ 0x4019, +/* 6c0 - */ 0, +/* 6c1 - */ 0, +/* 6c2 - */ 0, +/* 6c3 - */ 0, +/* 6c4 - _0F_13 */ 0x237d, +/* 6c5 - _66_0F_13 */ 0x237e, +/* 6c6 - */ 0, +/* 6c7 - */ 0, +/* 6c8 - _V_0F_13 */ 0x401a, +/* 6c9 - _V_66_0F_13 */ 0x401b, +/* 6ca - */ 0, +/* 6cb - */ 0, +/* 6cc - */ 0, +/* 6cd - */ 0, +/* 6ce - */ 0, +/* 6cf - */ 0, +/* 6d0 - _0F_14 */ 0x237f, +/* 6d1 - _66_0F_14 */ 0x2380, +/* 6d2 - */ 0, +/* 6d3 - */ 0, +/* 6d4 - _V_0F_14 */ 0x401c, +/* 6d5 - _V_66_0F_14 */ 0x401d, +/* 6d6 - */ 0, +/* 6d7 - */ 0, +/* 6d8 - */ 0, +/* 6d9 - */ 0, +/* 6da - */ 0, +/* 6db - */ 0, +/* 6dc - _0F_15 */ 0x2381, +/* 6dd - _66_0F_15 */ 0x2382, +/* 6de - */ 0, +/* 6df - */ 0, +/* 6e0 - _V_0F_15 */ 0x401e, +/* 6e1 - _V_66_0F_15 */ 0x401f, +/* 6e2 - */ 0, +/* 6e3 - */ 0, +/* 6e4 - */ 0, +/* 6e5 - */ 0, +/* 6e6 - */ 0, +/* 6e7 - */ 0, +/* 6e8 - _0F_16 */ 0x4020, +/* 6e9 - _66_0F_16 */ 0x2383, +/* 6ea - _F3_0F_16 */ 0x2384, +/* 6eb - */ 0, +/* 6ec - _V_0F_16 */ 0x4021, +/* 6ed - _V_66_0F_16 */ 0x4022, +/* 6ee - _V_F3_0F_16 */ 0x4023, +/* 6ef - */ 0, +/* 6f0 - */ 0, +/* 6f1 - */ 0, +/* 6f2 - */ 0, +/* 6f3 - */ 0, +/* 6f4 - _0F_17 */ 0x2385, +/* 6f5 - _66_0F_17 */ 0x2386, +/* 6f6 - */ 0, +/* 6f7 - */ 0, +/* 6f8 - _V_0F_17 */ 0x4024, +/* 6f9 - _V_66_0F_17 */ 0x4025, +/* 6fa - */ 0, +/* 6fb - */ 0, +/* 6fc - */ 0, +/* 6fd - */ 0, +/* 6fe - */ 0, +/* 6ff - */ 0, +/* 700 - _0F_18_00 */ 0x2387, +/* 701 - _0F_18_01 */ 0x2388, +/* 702 - _0F_18_02 */ 0x2389, +/* 703 - _0F_18_03 */ 0x238a, +/* 704 - */ 0, +/* 705 - */ 0, +/* 706 - */ 0, +/* 707 - */ 0, +/* 708 - _0F_28 */ 0x238b, +/* 709 - _66_0F_28 */ 0x238c, +/* 70a - */ 0, +/* 70b - */ 0, +/* 70c - _V_0F_28 */ 0x4026, +/* 70d - _V_66_0F_28 */ 0x4027, +/* 70e - */ 0, +/* 70f - */ 0, +/* 710 - */ 0, +/* 711 - */ 0, +/* 712 - */ 0, +/* 713 - */ 0, +/* 714 - _0F_29 */ 0x238d, +/* 715 - _66_0F_29 */ 0x238e, +/* 716 - */ 0, +/* 717 - */ 0, +/* 718 - _V_0F_29 */ 0x4028, +/* 719 - _V_66_0F_29 */ 0x4029, +/* 71a - */ 0, +/* 71b - */ 0, +/* 71c - */ 0, +/* 71d - */ 0, +/* 71e - */ 0, +/* 71f - */ 0, +/* 720 - _0F_2A */ 0x238f, +/* 721 - _66_0F_2A */ 0x2390, +/* 722 - _F3_0F_2A */ 0x2391, +/* 723 - _F2_0F_2A */ 0x2392, +/* 724 - */ 0, +/* 725 - */ 0, +/* 726 - _V_F3_0F_2A */ 0x402a, +/* 727 - _V_F2_0F_2A */ 0x402b, +/* 728 - */ 0, +/* 729 - */ 0, +/* 72a - */ 0, +/* 72b - */ 0, +/* 72c - _0F_2B */ 0x2393, +/* 72d - _66_0F_2B */ 0x2394, +/* 72e - _F3_0F_2B */ 0x2395, +/* 72f - _F2_0F_2B */ 0x2396, +/* 730 - _V_0F_2B */ 0x402c, +/* 731 - _V_66_0F_2B */ 0x402d, +/* 732 - */ 0, +/* 733 - */ 0, +/* 734 - */ 0, +/* 735 - */ 0, +/* 736 - */ 0, +/* 737 - */ 0, +/* 738 - _0F_2C */ 0x2397, +/* 739 - _66_0F_2C */ 0x2398, +/* 73a - _F3_0F_2C */ 0x2399, +/* 73b - _F2_0F_2C */ 0x239a, +/* 73c - */ 0, +/* 73d - */ 0, +/* 73e - _V_F3_0F_2C */ 0x402e, +/* 73f - _V_F2_0F_2C */ 0x402f, +/* 740 - */ 0, +/* 741 - */ 0, +/* 742 - */ 0, +/* 743 - */ 0, +/* 744 - _0F_2D */ 0x239b, +/* 745 - _66_0F_2D */ 0x239c, +/* 746 - _F3_0F_2D */ 0x239d, +/* 747 - _F2_0F_2D */ 0x239e, +/* 748 - */ 0, +/* 749 - */ 0, +/* 74a - _V_F3_0F_2D */ 0x4030, +/* 74b - _V_F2_0F_2D */ 0x4031, +/* 74c - */ 0, +/* 74d - */ 0, +/* 74e - */ 0, +/* 74f - */ 0, +/* 750 - _0F_2E */ 0x239f, +/* 751 - _66_0F_2E */ 0x23a0, +/* 752 - */ 0, +/* 753 - */ 0, +/* 754 - _V_0F_2E */ 0x4032, +/* 755 - _V_66_0F_2E */ 0x4033, +/* 756 - */ 0, +/* 757 - */ 0, +/* 758 - */ 0, +/* 759 - */ 0, +/* 75a - */ 0, +/* 75b - */ 0, +/* 75c - _0F_2F */ 0x23a1, +/* 75d - _66_0F_2F */ 0x23a2, +/* 75e - */ 0, +/* 75f - */ 0, +/* 760 - _V_0F_2F */ 0x4034, +/* 761 - _V_66_0F_2F */ 0x4035, +/* 762 - */ 0, +/* 763 - */ 0, +/* 764 - */ 0, +/* 765 - */ 0, +/* 766 - */ 0, +/* 767 - */ 0, +/* 768 - _0F_38_00 */ 0xcf78, +/* 769 - _0F_38_01 */ 0xcf84, +/* 76a - _0F_38_02 */ 0xcf90, +/* 76b - _0F_38_03 */ 0xcf9c, +/* 76c - _0F_38_04 */ 0xcfa8, +/* 76d - _0F_38_05 */ 0xcfb4, +/* 76e - _0F_38_06 */ 0xcfc0, +/* 76f - _0F_38_07 */ 0xcfcc, +/* 770 - _0F_38_08 */ 0xcfd8, +/* 771 - _0F_38_09 */ 0xcfe4, +/* 772 - _0F_38_0A */ 0xcff0, +/* 773 - _0F_38_0B */ 0xcffc, +/* 774 - _0F_38_0C */ 0xd008, +/* 775 - _0F_38_0D */ 0xd014, +/* 776 - _0F_38_0E */ 0xd020, +/* 777 - _0F_38_0F */ 0xd02c, +/* 778 - _0F_38_10 */ 0xd038, +/* 779 - */ 0, +/* 77a - */ 0, +/* 77b - */ 0, +/* 77c - _0F_38_14 */ 0xd044, +/* 77d - _0F_38_15 */ 0xd050, +/* 77e - */ 0, +/* 77f - _0F_38_17 */ 0xd05c, +/* 780 - _0F_38_18 */ 0xd068, +/* 781 - _0F_38_19 */ 0xd074, +/* 782 - _0F_38_1A */ 0xd080, +/* 783 - */ 0, +/* 784 - _0F_38_1C */ 0xd08c, +/* 785 - _0F_38_1D */ 0xd098, +/* 786 - _0F_38_1E */ 0xd0a4, +/* 787 - */ 0, +/* 788 - _0F_38_20 */ 0xd0b0, +/* 789 - _0F_38_21 */ 0xd0bc, +/* 78a - _0F_38_22 */ 0xd0c8, +/* 78b - _0F_38_23 */ 0xd0d4, +/* 78c - _0F_38_24 */ 0xd0e0, +/* 78d - _0F_38_25 */ 0xd0ec, +/* 78e - */ 0, +/* 78f - */ 0, +/* 790 - _0F_38_28 */ 0xd0f8, +/* 791 - _0F_38_29 */ 0xd104, +/* 792 - _0F_38_2A */ 0xd110, +/* 793 - _0F_38_2B */ 0xd11c, +/* 794 - _0F_38_2C */ 0xd128, +/* 795 - _0F_38_2D */ 0xd134, +/* 796 - _0F_38_2E */ 0xd140, +/* 797 - _0F_38_2F */ 0xd14c, +/* 798 - _0F_38_30 */ 0xd158, +/* 799 - _0F_38_31 */ 0xd164, +/* 79a - _0F_38_32 */ 0xd170, +/* 79b - _0F_38_33 */ 0xd17c, +/* 79c - _0F_38_34 */ 0xd188, +/* 79d - _0F_38_35 */ 0xd194, +/* 79e - */ 0, +/* 79f - _0F_38_37 */ 0xd1a0, +/* 7a0 - _0F_38_38 */ 0xd1ac, +/* 7a1 - _0F_38_39 */ 0xd1b8, +/* 7a2 - _0F_38_3A */ 0xd1c4, +/* 7a3 - _0F_38_3B */ 0xd1d0, +/* 7a4 - _0F_38_3C */ 0xd1dc, +/* 7a5 - _0F_38_3D */ 0xd1e8, +/* 7a6 - _0F_38_3E */ 0xd1f4, +/* 7a7 - _0F_38_3F */ 0xd200, +/* 7a8 - _0F_38_40 */ 0xd20c, +/* 7a9 - _0F_38_41 */ 0xd218, +/* 7aa - */ 0, +/* 7ab - */ 0, +/* 7ac - */ 0, +/* 7ad - */ 0, +/* 7ae - */ 0, +/* 7af - */ 0, +/* 7b0 - */ 0, +/* 7b1 - */ 0, +/* 7b2 - */ 0, +/* 7b3 - */ 0, +/* 7b4 - */ 0, +/* 7b5 - */ 0, +/* 7b6 - */ 0, +/* 7b7 - */ 0, +/* 7b8 - */ 0, +/* 7b9 - */ 0, +/* 7ba - */ 0, +/* 7bb - */ 0, +/* 7bc - */ 0, +/* 7bd - */ 0, +/* 7be - */ 0, +/* 7bf - */ 0, +/* 7c0 - */ 0, +/* 7c1 - */ 0, +/* 7c2 - */ 0, +/* 7c3 - */ 0, +/* 7c4 - */ 0, +/* 7c5 - */ 0, +/* 7c6 - */ 0, +/* 7c7 - */ 0, +/* 7c8 - */ 0, +/* 7c9 - */ 0, +/* 7ca - */ 0, +/* 7cb - */ 0, +/* 7cc - */ 0, +/* 7cd - */ 0, +/* 7ce - */ 0, +/* 7cf - */ 0, +/* 7d0 - */ 0, +/* 7d1 - */ 0, +/* 7d2 - */ 0, +/* 7d3 - */ 0, +/* 7d4 - */ 0, +/* 7d5 - */ 0, +/* 7d6 - */ 0, +/* 7d7 - */ 0, +/* 7d8 - */ 0, +/* 7d9 - */ 0, +/* 7da - */ 0, +/* 7db - */ 0, +/* 7dc - */ 0, +/* 7dd - */ 0, +/* 7de - */ 0, +/* 7df - */ 0, +/* 7e0 - */ 0, +/* 7e1 - */ 0, +/* 7e2 - */ 0, +/* 7e3 - */ 0, +/* 7e4 - */ 0, +/* 7e5 - */ 0, +/* 7e6 - */ 0, +/* 7e7 - */ 0, +/* 7e8 - _0F_38_80 */ 0xd224, +/* 7e9 - _0F_38_81 */ 0xd230, +/* 7ea - _0F_38_82 */ 0xd23c, +/* 7eb - */ 0, +/* 7ec - */ 0, +/* 7ed - */ 0, +/* 7ee - */ 0, +/* 7ef - */ 0, +/* 7f0 - */ 0, +/* 7f1 - */ 0, +/* 7f2 - */ 0, +/* 7f3 - */ 0, +/* 7f4 - */ 0, +/* 7f5 - */ 0, +/* 7f6 - */ 0, +/* 7f7 - */ 0, +/* 7f8 - */ 0, +/* 7f9 - */ 0, +/* 7fa - */ 0, +/* 7fb - */ 0, +/* 7fc - */ 0, +/* 7fd - */ 0, +/* 7fe - _0F_38_96 */ 0xd248, +/* 7ff - _0F_38_97 */ 0xd254, +/* 800 - _0F_38_98 */ 0xd260, +/* 801 - _0F_38_99 */ 0xd26c, +/* 802 - _0F_38_9A */ 0xd278, +/* 803 - _0F_38_9B */ 0xd284, +/* 804 - _0F_38_9C */ 0xd290, +/* 805 - _0F_38_9D */ 0xd29c, +/* 806 - _0F_38_9E */ 0xd2a8, +/* 807 - _0F_38_9F */ 0xd2b4, +/* 808 - */ 0, +/* 809 - */ 0, +/* 80a - */ 0, +/* 80b - */ 0, +/* 80c - */ 0, +/* 80d - */ 0, +/* 80e - _0F_38_A6 */ 0xd2c0, +/* 80f - _0F_38_A7 */ 0xd2cc, +/* 810 - _0F_38_A8 */ 0xd2d8, +/* 811 - _0F_38_A9 */ 0xd2e4, +/* 812 - _0F_38_AA */ 0xd2f0, +/* 813 - _0F_38_AB */ 0xd2fc, +/* 814 - _0F_38_AC */ 0xd308, +/* 815 - _0F_38_AD */ 0xd314, +/* 816 - _0F_38_AE */ 0xd320, +/* 817 - _0F_38_AF */ 0xd32c, +/* 818 - */ 0, +/* 819 - */ 0, +/* 81a - */ 0, +/* 81b - */ 0, +/* 81c - */ 0, +/* 81d - */ 0, +/* 81e - _0F_38_B6 */ 0xd338, +/* 81f - _0F_38_B7 */ 0xd344, +/* 820 - _0F_38_B8 */ 0xd350, +/* 821 - _0F_38_B9 */ 0xd35c, +/* 822 - _0F_38_BA */ 0xd368, +/* 823 - _0F_38_BB */ 0xd374, +/* 824 - _0F_38_BC */ 0xd380, +/* 825 - _0F_38_BD */ 0xd38c, +/* 826 - _0F_38_BE */ 0xd398, +/* 827 - _0F_38_BF */ 0xd3a4, +/* 828 - */ 0, +/* 829 - */ 0, +/* 82a - */ 0, +/* 82b - */ 0, +/* 82c - */ 0, +/* 82d - */ 0, +/* 82e - */ 0, +/* 82f - */ 0, +/* 830 - */ 0, +/* 831 - */ 0, +/* 832 - */ 0, +/* 833 - */ 0, +/* 834 - */ 0, +/* 835 - */ 0, +/* 836 - */ 0, +/* 837 - */ 0, +/* 838 - */ 0, +/* 839 - */ 0, +/* 83a - */ 0, +/* 83b - */ 0, +/* 83c - */ 0, +/* 83d - */ 0, +/* 83e - */ 0, +/* 83f - */ 0, +/* 840 - */ 0, +/* 841 - */ 0, +/* 842 - */ 0, +/* 843 - _0F_38_DB */ 0xd3b0, +/* 844 - _0F_38_DC */ 0xd3bc, +/* 845 - _0F_38_DD */ 0xd3c8, +/* 846 - _0F_38_DE */ 0xd3d4, +/* 847 - _0F_38_DF */ 0xd3e0, +/* 848 - */ 0, +/* 849 - */ 0, +/* 84a - */ 0, +/* 84b - */ 0, +/* 84c - */ 0, +/* 84d - */ 0, +/* 84e - */ 0, +/* 84f - */ 0, +/* 850 - */ 0, +/* 851 - */ 0, +/* 852 - */ 0, +/* 853 - */ 0, +/* 854 - */ 0, +/* 855 - */ 0, +/* 856 - */ 0, +/* 857 - */ 0, +/* 858 - _0F_38_F0 */ 0xd3ec, +/* 859 - _0F_38_F1 */ 0xd3f8, +/* 85a - */ 0, +/* 85b - */ 0, +/* 85c - */ 0, +/* 85d - */ 0, +/* 85e - */ 0, +/* 85f - */ 0, +/* 860 - */ 0, +/* 861 - */ 0, +/* 862 - */ 0, +/* 863 - */ 0, +/* 864 - */ 0, +/* 865 - */ 0, +/* 866 - */ 0, +/* 867 - */ 0, +/* 868 - */ 0, +/* 869 - */ 0, +/* 86a - */ 0, +/* 86b - */ 0, +/* 86c - _0F_3A_04 */ 0xd404, +/* 86d - _0F_3A_05 */ 0xd410, +/* 86e - _0F_3A_06 */ 0xd41c, +/* 86f - */ 0, +/* 870 - _0F_3A_08 */ 0xd428, +/* 871 - _0F_3A_09 */ 0xd434, +/* 872 - _0F_3A_0A */ 0xd440, +/* 873 - _0F_3A_0B */ 0xd44c, +/* 874 - _0F_3A_0C */ 0xd458, +/* 875 - _0F_3A_0D */ 0xd464, +/* 876 - _0F_3A_0E */ 0xd470, +/* 877 - _0F_3A_0F */ 0xd47c, +/* 878 - */ 0, +/* 879 - */ 0, +/* 87a - */ 0, +/* 87b - */ 0, +/* 87c - _0F_3A_14 */ 0xd488, +/* 87d - _0F_3A_15 */ 0xd494, +/* 87e - _0F_3A_16 */ 0xd4a0, +/* 87f - _0F_3A_17 */ 0xd4ac, +/* 880 - _0F_3A_18 */ 0xd4b8, +/* 881 - _0F_3A_19 */ 0xd4c4, +/* 882 - */ 0, +/* 883 - */ 0, +/* 884 - */ 0, +/* 885 - */ 0, +/* 886 - */ 0, +/* 887 - */ 0, +/* 888 - _0F_3A_20 */ 0xd4d0, +/* 889 - _0F_3A_21 */ 0xd4dc, +/* 88a - _0F_3A_22 */ 0xd4e8, +/* 88b - */ 0, +/* 88c - */ 0, +/* 88d - */ 0, +/* 88e - */ 0, +/* 88f - */ 0, +/* 890 - */ 0, +/* 891 - */ 0, +/* 892 - */ 0, +/* 893 - */ 0, +/* 894 - */ 0, +/* 895 - */ 0, +/* 896 - */ 0, +/* 897 - */ 0, +/* 898 - */ 0, +/* 899 - */ 0, +/* 89a - */ 0, +/* 89b - */ 0, +/* 89c - */ 0, +/* 89d - */ 0, +/* 89e - */ 0, +/* 89f - */ 0, +/* 8a0 - */ 0, +/* 8a1 - */ 0, +/* 8a2 - */ 0, +/* 8a3 - */ 0, +/* 8a4 - */ 0, +/* 8a5 - */ 0, +/* 8a6 - */ 0, +/* 8a7 - */ 0, +/* 8a8 - _0F_3A_40 */ 0xd4f4, +/* 8a9 - _0F_3A_41 */ 0xd500, +/* 8aa - _0F_3A_42 */ 0xd50c, +/* 8ab - */ 0, +/* 8ac - _0F_3A_44 */ 0xd518, +/* 8ad - */ 0, +/* 8ae - */ 0, +/* 8af - */ 0, +/* 8b0 - */ 0, +/* 8b1 - */ 0, +/* 8b2 - _0F_3A_4A */ 0xd524, +/* 8b3 - _0F_3A_4B */ 0xd530, +/* 8b4 - _0F_3A_4C */ 0xd53c, +/* 8b5 - */ 0, +/* 8b6 - */ 0, +/* 8b7 - */ 0, +/* 8b8 - */ 0, +/* 8b9 - */ 0, +/* 8ba - */ 0, +/* 8bb - */ 0, +/* 8bc - */ 0, +/* 8bd - */ 0, +/* 8be - */ 0, +/* 8bf - */ 0, +/* 8c0 - */ 0, +/* 8c1 - */ 0, +/* 8c2 - */ 0, +/* 8c3 - */ 0, +/* 8c4 - */ 0, +/* 8c5 - */ 0, +/* 8c6 - */ 0, +/* 8c7 - */ 0, +/* 8c8 - _0F_3A_60 */ 0xd548, +/* 8c9 - _0F_3A_61 */ 0xd554, +/* 8ca - _0F_3A_62 */ 0xd560, +/* 8cb - _0F_3A_63 */ 0xd56c, +/* 8cc - */ 0, +/* 8cd - */ 0, +/* 8ce - */ 0, +/* 8cf - */ 0, +/* 8d0 - */ 0, +/* 8d1 - */ 0, +/* 8d2 - */ 0, +/* 8d3 - */ 0, +/* 8d4 - */ 0, +/* 8d5 - */ 0, +/* 8d6 - */ 0, +/* 8d7 - */ 0, +/* 8d8 - */ 0, +/* 8d9 - */ 0, +/* 8da - */ 0, +/* 8db - */ 0, +/* 8dc - */ 0, +/* 8dd - */ 0, +/* 8de - */ 0, +/* 8df - */ 0, +/* 8e0 - */ 0, +/* 8e1 - */ 0, +/* 8e2 - */ 0, +/* 8e3 - */ 0, +/* 8e4 - */ 0, +/* 8e5 - */ 0, +/* 8e6 - */ 0, +/* 8e7 - */ 0, +/* 8e8 - */ 0, +/* 8e9 - */ 0, +/* 8ea - */ 0, +/* 8eb - */ 0, +/* 8ec - */ 0, +/* 8ed - */ 0, +/* 8ee - */ 0, +/* 8ef - */ 0, +/* 8f0 - */ 0, +/* 8f1 - */ 0, +/* 8f2 - */ 0, +/* 8f3 - */ 0, +/* 8f4 - */ 0, +/* 8f5 - */ 0, +/* 8f6 - */ 0, +/* 8f7 - */ 0, +/* 8f8 - */ 0, +/* 8f9 - */ 0, +/* 8fa - */ 0, +/* 8fb - */ 0, +/* 8fc - */ 0, +/* 8fd - */ 0, +/* 8fe - */ 0, +/* 8ff - */ 0, +/* 900 - */ 0, +/* 901 - */ 0, +/* 902 - */ 0, +/* 903 - */ 0, +/* 904 - */ 0, +/* 905 - */ 0, +/* 906 - */ 0, +/* 907 - */ 0, +/* 908 - */ 0, +/* 909 - */ 0, +/* 90a - */ 0, +/* 90b - */ 0, +/* 90c - */ 0, +/* 90d - */ 0, +/* 90e - */ 0, +/* 90f - */ 0, +/* 910 - */ 0, +/* 911 - */ 0, +/* 912 - */ 0, +/* 913 - */ 0, +/* 914 - */ 0, +/* 915 - */ 0, +/* 916 - */ 0, +/* 917 - */ 0, +/* 918 - */ 0, +/* 919 - */ 0, +/* 91a - */ 0, +/* 91b - */ 0, +/* 91c - */ 0, +/* 91d - */ 0, +/* 91e - */ 0, +/* 91f - */ 0, +/* 920 - */ 0, +/* 921 - */ 0, +/* 922 - */ 0, +/* 923 - */ 0, +/* 924 - */ 0, +/* 925 - */ 0, +/* 926 - */ 0, +/* 927 - */ 0, +/* 928 - */ 0, +/* 929 - */ 0, +/* 92a - */ 0, +/* 92b - */ 0, +/* 92c - */ 0, +/* 92d - */ 0, +/* 92e - */ 0, +/* 92f - */ 0, +/* 930 - */ 0, +/* 931 - */ 0, +/* 932 - */ 0, +/* 933 - */ 0, +/* 934 - */ 0, +/* 935 - */ 0, +/* 936 - */ 0, +/* 937 - */ 0, +/* 938 - */ 0, +/* 939 - */ 0, +/* 93a - */ 0, +/* 93b - */ 0, +/* 93c - */ 0, +/* 93d - */ 0, +/* 93e - */ 0, +/* 93f - */ 0, +/* 940 - */ 0, +/* 941 - */ 0, +/* 942 - */ 0, +/* 943 - */ 0, +/* 944 - */ 0, +/* 945 - */ 0, +/* 946 - */ 0, +/* 947 - _0F_3A_DF */ 0xd578, +/* 948 - */ 0, +/* 949 - */ 0, +/* 94a - */ 0, +/* 94b - */ 0, +/* 94c - */ 0, +/* 94d - */ 0, +/* 94e - */ 0, +/* 94f - */ 0, +/* 950 - */ 0, +/* 951 - */ 0, +/* 952 - */ 0, +/* 953 - */ 0, +/* 954 - */ 0, +/* 955 - */ 0, +/* 956 - */ 0, +/* 957 - */ 0, +/* 958 - */ 0, +/* 959 - */ 0, +/* 95a - */ 0, +/* 95b - */ 0, +/* 95c - */ 0, +/* 95d - */ 0, +/* 95e - */ 0, +/* 95f - */ 0, +/* 960 - */ 0, +/* 961 - */ 0, +/* 962 - */ 0, +/* 963 - */ 0, +/* 964 - */ 0, +/* 965 - */ 0, +/* 966 - */ 0, +/* 967 - */ 0, +/* 968 - _0F_50 */ 0x23a3, +/* 969 - _66_0F_50 */ 0x23a4, +/* 96a - */ 0, +/* 96b - */ 0, +/* 96c - _V_0F_50 */ 0x4036, +/* 96d - _V_66_0F_50 */ 0x4037, +/* 96e - */ 0, +/* 96f - */ 0, +/* 970 - */ 0, +/* 971 - */ 0, +/* 972 - */ 0, +/* 973 - */ 0, +/* 974 - _0F_51 */ 0x23a5, +/* 975 - _66_0F_51 */ 0x23a6, +/* 976 - _F3_0F_51 */ 0x23a7, +/* 977 - _F2_0F_51 */ 0x23a8, +/* 978 - _V_0F_51 */ 0x4038, +/* 979 - _V_66_0F_51 */ 0x4039, +/* 97a - _V_F3_0F_51 */ 0x403a, +/* 97b - _V_F2_0F_51 */ 0x403b, +/* 97c - */ 0, +/* 97d - */ 0, +/* 97e - */ 0, +/* 97f - */ 0, +/* 980 - _0F_52 */ 0x23a9, +/* 981 - */ 0, +/* 982 - _F3_0F_52 */ 0x23aa, +/* 983 - */ 0, +/* 984 - _V_0F_52 */ 0x403c, +/* 985 - */ 0, +/* 986 - _V_F3_0F_52 */ 0x403d, +/* 987 - */ 0, +/* 988 - */ 0, +/* 989 - */ 0, +/* 98a - */ 0, +/* 98b - */ 0, +/* 98c - _0F_53 */ 0x23ab, +/* 98d - */ 0, +/* 98e - _F3_0F_53 */ 0x23ac, +/* 98f - */ 0, +/* 990 - _V_0F_53 */ 0x403e, +/* 991 - */ 0, +/* 992 - _V_F3_0F_53 */ 0x403f, +/* 993 - */ 0, +/* 994 - */ 0, +/* 995 - */ 0, +/* 996 - */ 0, +/* 997 - */ 0, +/* 998 - _0F_54 */ 0x23ad, +/* 999 - _66_0F_54 */ 0x23ae, +/* 99a - */ 0, +/* 99b - */ 0, +/* 99c - _V_0F_54 */ 0x4040, +/* 99d - _V_66_0F_54 */ 0x4041, +/* 99e - */ 0, +/* 99f - */ 0, +/* 9a0 - */ 0, +/* 9a1 - */ 0, +/* 9a2 - */ 0, +/* 9a3 - */ 0, +/* 9a4 - _0F_55 */ 0x23af, +/* 9a5 - _66_0F_55 */ 0x23b0, +/* 9a6 - */ 0, +/* 9a7 - */ 0, +/* 9a8 - _V_0F_55 */ 0x4042, +/* 9a9 - _V_66_0F_55 */ 0x4043, +/* 9aa - */ 0, +/* 9ab - */ 0, +/* 9ac - */ 0, +/* 9ad - */ 0, +/* 9ae - */ 0, +/* 9af - */ 0, +/* 9b0 - _0F_56 */ 0x23b1, +/* 9b1 - _66_0F_56 */ 0x23b2, +/* 9b2 - */ 0, +/* 9b3 - */ 0, +/* 9b4 - _V_0F_56 */ 0x4044, +/* 9b5 - _V_66_0F_56 */ 0x4045, +/* 9b6 - */ 0, +/* 9b7 - */ 0, +/* 9b8 - */ 0, +/* 9b9 - */ 0, +/* 9ba - */ 0, +/* 9bb - */ 0, +/* 9bc - _0F_57 */ 0x23b3, +/* 9bd - _66_0F_57 */ 0x23b4, +/* 9be - */ 0, +/* 9bf - */ 0, +/* 9c0 - _V_0F_57 */ 0x4046, +/* 9c1 - _V_66_0F_57 */ 0x4047, +/* 9c2 - */ 0, +/* 9c3 - */ 0, +/* 9c4 - */ 0, +/* 9c5 - */ 0, +/* 9c6 - */ 0, +/* 9c7 - */ 0, +/* 9c8 - _0F_58 */ 0x23b5, +/* 9c9 - _66_0F_58 */ 0x23b6, +/* 9ca - _F3_0F_58 */ 0x23b7, +/* 9cb - _F2_0F_58 */ 0x23b8, +/* 9cc - _V_0F_58 */ 0x4048, +/* 9cd - _V_66_0F_58 */ 0x4049, +/* 9ce - _V_F3_0F_58 */ 0x404a, +/* 9cf - _V_F2_0F_58 */ 0x404b, +/* 9d0 - */ 0, +/* 9d1 - */ 0, +/* 9d2 - */ 0, +/* 9d3 - */ 0, +/* 9d4 - _0F_59 */ 0x23b9, +/* 9d5 - _66_0F_59 */ 0x23ba, +/* 9d6 - _F3_0F_59 */ 0x23bb, +/* 9d7 - _F2_0F_59 */ 0x23bc, +/* 9d8 - _V_0F_59 */ 0x404c, +/* 9d9 - _V_66_0F_59 */ 0x404d, +/* 9da - _V_F3_0F_59 */ 0x404e, +/* 9db - _V_F2_0F_59 */ 0x404f, +/* 9dc - */ 0, +/* 9dd - */ 0, +/* 9de - */ 0, +/* 9df - */ 0, +/* 9e0 - _0F_5A */ 0x23bd, +/* 9e1 - _66_0F_5A */ 0x23be, +/* 9e2 - _F3_0F_5A */ 0x23bf, +/* 9e3 - _F2_0F_5A */ 0x23c0, +/* 9e4 - _V_0F_5A */ 0x4050, +/* 9e5 - _V_66_0F_5A */ 0x4051, +/* 9e6 - _V_F3_0F_5A */ 0x4052, +/* 9e7 - _V_F2_0F_5A */ 0x4053, +/* 9e8 - */ 0, +/* 9e9 - */ 0, +/* 9ea - */ 0, +/* 9eb - */ 0, +/* 9ec - _0F_5B */ 0x23c1, +/* 9ed - _66_0F_5B */ 0x23c2, +/* 9ee - _F3_0F_5B */ 0x23c3, +/* 9ef - */ 0, +/* 9f0 - _V_0F_5B */ 0x4054, +/* 9f1 - _V_66_0F_5B */ 0x4055, +/* 9f2 - _V_F3_0F_5B */ 0x4056, +/* 9f3 - */ 0, +/* 9f4 - */ 0, +/* 9f5 - */ 0, +/* 9f6 - */ 0, +/* 9f7 - */ 0, +/* 9f8 - _0F_5C */ 0x23c4, +/* 9f9 - _66_0F_5C */ 0x23c5, +/* 9fa - _F3_0F_5C */ 0x23c6, +/* 9fb - _F2_0F_5C */ 0x23c7, +/* 9fc - _V_0F_5C */ 0x4057, +/* 9fd - _V_66_0F_5C */ 0x4058, +/* 9fe - _V_F3_0F_5C */ 0x4059, +/* 9ff - _V_F2_0F_5C */ 0x405a, +/* a00 - */ 0, +/* a01 - */ 0, +/* a02 - */ 0, +/* a03 - */ 0, +/* a04 - _0F_5D */ 0x23c8, +/* a05 - _66_0F_5D */ 0x23c9, +/* a06 - _F3_0F_5D */ 0x23ca, +/* a07 - _F2_0F_5D */ 0x23cb, +/* a08 - _V_0F_5D */ 0x405b, +/* a09 - _V_66_0F_5D */ 0x405c, +/* a0a - _V_F3_0F_5D */ 0x405d, +/* a0b - _V_F2_0F_5D */ 0x405e, +/* a0c - */ 0, +/* a0d - */ 0, +/* a0e - */ 0, +/* a0f - */ 0, +/* a10 - _0F_5E */ 0x23cc, +/* a11 - _66_0F_5E */ 0x23cd, +/* a12 - _F3_0F_5E */ 0x23ce, +/* a13 - _F2_0F_5E */ 0x23cf, +/* a14 - _V_0F_5E */ 0x405f, +/* a15 - _V_66_0F_5E */ 0x4060, +/* a16 - _V_F3_0F_5E */ 0x4061, +/* a17 - _V_F2_0F_5E */ 0x4062, +/* a18 - */ 0, +/* a19 - */ 0, +/* a1a - */ 0, +/* a1b - */ 0, +/* a1c - _0F_5F */ 0x23d0, +/* a1d - _66_0F_5F */ 0x23d1, +/* a1e - _F3_0F_5F */ 0x23d2, +/* a1f - _F2_0F_5F */ 0x23d3, +/* a20 - _V_0F_5F */ 0x4063, +/* a21 - _V_66_0F_5F */ 0x4064, +/* a22 - _V_F3_0F_5F */ 0x4065, +/* a23 - _V_F2_0F_5F */ 0x4066, +/* a24 - */ 0, +/* a25 - */ 0, +/* a26 - */ 0, +/* a27 - */ 0, +/* a28 - _0F_60 */ 0x23d4, +/* a29 - _66_0F_60 */ 0x23d5, +/* a2a - */ 0, +/* a2b - */ 0, +/* a2c - */ 0, +/* a2d - _V_66_0F_60 */ 0x4067, +/* a2e - */ 0, +/* a2f - */ 0, +/* a30 - */ 0, +/* a31 - */ 0, +/* a32 - */ 0, +/* a33 - */ 0, +/* a34 - _0F_61 */ 0x23d6, +/* a35 - _66_0F_61 */ 0x23d7, +/* a36 - */ 0, +/* a37 - */ 0, +/* a38 - */ 0, +/* a39 - _V_66_0F_61 */ 0x4068, +/* a3a - */ 0, +/* a3b - */ 0, +/* a3c - */ 0, +/* a3d - */ 0, +/* a3e - */ 0, +/* a3f - */ 0, +/* a40 - _0F_62 */ 0x23d8, +/* a41 - _66_0F_62 */ 0x23d9, +/* a42 - */ 0, +/* a43 - */ 0, +/* a44 - */ 0, +/* a45 - _V_66_0F_62 */ 0x4069, +/* a46 - */ 0, +/* a47 - */ 0, +/* a48 - */ 0, +/* a49 - */ 0, +/* a4a - */ 0, +/* a4b - */ 0, +/* a4c - _0F_63 */ 0x23da, +/* a4d - _66_0F_63 */ 0x23db, +/* a4e - */ 0, +/* a4f - */ 0, +/* a50 - */ 0, +/* a51 - _V_66_0F_63 */ 0x406a, +/* a52 - */ 0, +/* a53 - */ 0, +/* a54 - */ 0, +/* a55 - */ 0, +/* a56 - */ 0, +/* a57 - */ 0, +/* a58 - _0F_64 */ 0x23dc, +/* a59 - _66_0F_64 */ 0x23dd, +/* a5a - */ 0, +/* a5b - */ 0, +/* a5c - */ 0, +/* a5d - _V_66_0F_64 */ 0x406b, +/* a5e - */ 0, +/* a5f - */ 0, +/* a60 - */ 0, +/* a61 - */ 0, +/* a62 - */ 0, +/* a63 - */ 0, +/* a64 - _0F_65 */ 0x23de, +/* a65 - _66_0F_65 */ 0x23df, +/* a66 - */ 0, +/* a67 - */ 0, +/* a68 - */ 0, +/* a69 - _V_66_0F_65 */ 0x406c, +/* a6a - */ 0, +/* a6b - */ 0, +/* a6c - */ 0, +/* a6d - */ 0, +/* a6e - */ 0, +/* a6f - */ 0, +/* a70 - _0F_66 */ 0x23e0, +/* a71 - _66_0F_66 */ 0x23e1, +/* a72 - */ 0, +/* a73 - */ 0, +/* a74 - */ 0, +/* a75 - _V_66_0F_66 */ 0x406d, +/* a76 - */ 0, +/* a77 - */ 0, +/* a78 - */ 0, +/* a79 - */ 0, +/* a7a - */ 0, +/* a7b - */ 0, +/* a7c - _0F_67 */ 0x23e2, +/* a7d - _66_0F_67 */ 0x23e3, +/* a7e - */ 0, +/* a7f - */ 0, +/* a80 - */ 0, +/* a81 - _V_66_0F_67 */ 0x406e, +/* a82 - */ 0, +/* a83 - */ 0, +/* a84 - */ 0, +/* a85 - */ 0, +/* a86 - */ 0, +/* a87 - */ 0, +/* a88 - _0F_68 */ 0x23e4, +/* a89 - _66_0F_68 */ 0x23e5, +/* a8a - */ 0, +/* a8b - */ 0, +/* a8c - */ 0, +/* a8d - _V_66_0F_68 */ 0x406f, +/* a8e - */ 0, +/* a8f - */ 0, +/* a90 - */ 0, +/* a91 - */ 0, +/* a92 - */ 0, +/* a93 - */ 0, +/* a94 - _0F_69 */ 0x23e6, +/* a95 - _66_0F_69 */ 0x23e7, +/* a96 - */ 0, +/* a97 - */ 0, +/* a98 - */ 0, +/* a99 - _V_66_0F_69 */ 0x4070, +/* a9a - */ 0, +/* a9b - */ 0, +/* a9c - */ 0, +/* a9d - */ 0, +/* a9e - */ 0, +/* a9f - */ 0, +/* aa0 - _0F_6A */ 0x23e8, +/* aa1 - _66_0F_6A */ 0x23e9, +/* aa2 - */ 0, +/* aa3 - */ 0, +/* aa4 - */ 0, +/* aa5 - _V_66_0F_6A */ 0x4071, +/* aa6 - */ 0, +/* aa7 - */ 0, +/* aa8 - */ 0, +/* aa9 - */ 0, +/* aaa - */ 0, +/* aab - */ 0, +/* aac - _0F_6B */ 0x23ea, +/* aad - _66_0F_6B */ 0x23eb, +/* aae - */ 0, +/* aaf - */ 0, +/* ab0 - */ 0, +/* ab1 - _V_66_0F_6B */ 0x4072, +/* ab2 - */ 0, +/* ab3 - */ 0, +/* ab4 - */ 0, +/* ab5 - */ 0, +/* ab6 - */ 0, +/* ab7 - */ 0, +/* ab8 - */ 0, +/* ab9 - _66_0F_6C */ 0x23ec, +/* aba - */ 0, +/* abb - */ 0, +/* abc - */ 0, +/* abd - _V_66_0F_6C */ 0x4073, +/* abe - */ 0, +/* abf - */ 0, +/* ac0 - */ 0, +/* ac1 - */ 0, +/* ac2 - */ 0, +/* ac3 - */ 0, +/* ac4 - */ 0, +/* ac5 - _66_0F_6D */ 0x23ed, +/* ac6 - */ 0, +/* ac7 - */ 0, +/* ac8 - */ 0, +/* ac9 - _V_66_0F_6D */ 0x4074, +/* aca - */ 0, +/* acb - */ 0, +/* acc - */ 0, +/* acd - */ 0, +/* ace - */ 0, +/* acf - */ 0, +/* ad0 - _0F_6E */ 0x4075, +/* ad1 - _66_0F_6E */ 0x4076, +/* ad2 - */ 0, +/* ad3 - */ 0, +/* ad4 - */ 0, +/* ad5 - _V_66_0F_6E */ 0x4077, +/* ad6 - */ 0, +/* ad7 - */ 0, +/* ad8 - */ 0, +/* ad9 - */ 0, +/* ada - */ 0, +/* adb - */ 0, +/* adc - _0F_6F */ 0x23ee, +/* add - _66_0F_6F */ 0x23ef, +/* ade - _F3_0F_6F */ 0x23f0, +/* adf - */ 0, +/* ae0 - */ 0, +/* ae1 - _V_66_0F_6F */ 0x4078, +/* ae2 - _V_F3_0F_6F */ 0x4079, +/* ae3 - */ 0, +/* ae4 - */ 0, +/* ae5 - */ 0, +/* ae6 - */ 0, +/* ae7 - */ 0, +/* ae8 - _0F_70 */ 0x407a, +/* ae9 - _66_0F_70 */ 0x407b, +/* aea - _F3_0F_70 */ 0x407c, +/* aeb - _F2_0F_70 */ 0x407d, +/* aec - */ 0, +/* aed - _V_66_0F_70 */ 0x407e, +/* aee - _V_F3_0F_70 */ 0x407f, +/* aef - _V_F2_0F_70 */ 0x4080, +/* af0 - */ 0, +/* af1 - */ 0, +/* af2 - */ 0, +/* af3 - */ 0, +/* af4 - */ 0, +/* af5 - */ 0, +/* af6 - _0F_71_02 */ 0xd584, +/* af7 - */ 0, +/* af8 - _0F_71_04 */ 0xd590, +/* af9 - */ 0, +/* afa - _0F_71_06 */ 0xd59c, +/* afb - */ 0, +/* afc - */ 0, +/* afd - */ 0, +/* afe - _0F_72_02 */ 0xd5a8, +/* aff - */ 0, +/* b00 - _0F_72_04 */ 0xd5b4, +/* b01 - */ 0, +/* b02 - _0F_72_06 */ 0xd5c0, +/* b03 - */ 0, +/* b04 - */ 0, +/* b05 - */ 0, +/* b06 - _0F_73_02 */ 0xd5cc, +/* b07 - _0F_73_03 */ 0xd5d8, +/* b08 - */ 0, +/* b09 - */ 0, +/* b0a - _0F_73_06 */ 0xd5e4, +/* b0b - _0F_73_07 */ 0xd5f0, +/* b0c - _0F_74 */ 0x23f1, +/* b0d - _66_0F_74 */ 0x23f2, +/* b0e - */ 0, +/* b0f - */ 0, +/* b10 - */ 0, +/* b11 - _V_66_0F_74 */ 0x4081, +/* b12 - */ 0, +/* b13 - */ 0, +/* b14 - */ 0, +/* b15 - */ 0, +/* b16 - */ 0, +/* b17 - */ 0, +/* b18 - _0F_75 */ 0x23f3, +/* b19 - _66_0F_75 */ 0x23f4, +/* b1a - */ 0, +/* b1b - */ 0, +/* b1c - */ 0, +/* b1d - _V_66_0F_75 */ 0x4082, +/* b1e - */ 0, +/* b1f - */ 0, +/* b20 - */ 0, +/* b21 - */ 0, +/* b22 - */ 0, +/* b23 - */ 0, +/* b24 - _0F_76 */ 0x23f5, +/* b25 - _66_0F_76 */ 0x23f6, +/* b26 - */ 0, +/* b27 - */ 0, +/* b28 - */ 0, +/* b29 - _V_66_0F_76 */ 0x4083, +/* b2a - */ 0, +/* b2b - */ 0, +/* b2c - */ 0, +/* b2d - */ 0, +/* b2e - */ 0, +/* b2f - */ 0, +/* b30 - _0F_77 */ 0x23f7, +/* b31 - */ 0, +/* b32 - */ 0, +/* b33 - */ 0, +/* b34 - _V_0F_77 */ 0x4084, +/* b35 - */ 0, +/* b36 - */ 0, +/* b37 - */ 0, +/* b38 - */ 0, +/* b39 - */ 0, +/* b3a - */ 0, +/* b3b - */ 0, +/* b3c - _0F_78 */ 0x23f8, +/* b3d - _66_0F_78 */ 0x4085, +/* b3e - */ 0, +/* b3f - _F2_0F_78 */ 0x4086, +/* b40 - */ 0, +/* b41 - */ 0, +/* b42 - */ 0, +/* b43 - */ 0, +/* b44 - */ 0, +/* b45 - */ 0, +/* b46 - */ 0, +/* b47 - */ 0, +/* b48 - _0F_79 */ 0x23f9, +/* b49 - _66_0F_79 */ 0x23fa, +/* b4a - */ 0, +/* b4b - _F2_0F_79 */ 0x23fb, +/* b4c - */ 0, +/* b4d - */ 0, +/* b4e - */ 0, +/* b4f - */ 0, +/* b50 - */ 0, +/* b51 - */ 0, +/* b52 - */ 0, +/* b53 - */ 0, +/* b54 - */ 0, +/* b55 - */ 0, +/* b56 - */ 0, +/* b57 - */ 0, +/* b58 - */ 0, +/* b59 - */ 0, +/* b5a - */ 0, +/* b5b - */ 0, +/* b5c - */ 0, +/* b5d - */ 0, +/* b5e - */ 0, +/* b5f - */ 0, +/* b60 - */ 0, +/* b61 - */ 0, +/* b62 - */ 0, +/* b63 - */ 0, +/* b64 - */ 0, +/* b65 - */ 0, +/* b66 - */ 0, +/* b67 - */ 0, +/* b68 - */ 0, +/* b69 - */ 0, +/* b6a - */ 0, +/* b6b - */ 0, +/* b6c - */ 0, +/* b6d - */ 0, +/* b6e - */ 0, +/* b6f - */ 0, +/* b70 - */ 0, +/* b71 - */ 0, +/* b72 - */ 0, +/* b73 - */ 0, +/* b74 - */ 0, +/* b75 - */ 0, +/* b76 - */ 0, +/* b77 - */ 0, +/* b78 - */ 0, +/* b79 - */ 0, +/* b7a - */ 0, +/* b7b - */ 0, +/* b7c - */ 0, +/* b7d - */ 0, +/* b7e - */ 0, +/* b7f - */ 0, +/* b80 - */ 0, +/* b81 - */ 0, +/* b82 - */ 0, +/* b83 - */ 0, +/* b84 - _0F_7A_30 */ 0x23fc, +/* b85 - _0F_7A_31 */ 0x23fd, +/* b86 - */ 0, +/* b87 - */ 0, +/* b88 - */ 0, +/* b89 - */ 0, +/* b8a - */ 0, +/* b8b - */ 0, +/* b8c - */ 0, +/* b8d - */ 0, +/* b8e - */ 0, +/* b8f - */ 0, +/* b90 - */ 0, +/* b91 - */ 0, +/* b92 - */ 0, +/* b93 - */ 0, +/* b94 - */ 0, +/* b95 - */ 0, +/* b96 - */ 0, +/* b97 - */ 0, +/* b98 - */ 0, +/* b99 - */ 0, +/* b9a - */ 0, +/* b9b - */ 0, +/* b9c - */ 0, +/* b9d - */ 0, +/* b9e - */ 0, +/* b9f - */ 0, +/* ba0 - */ 0, +/* ba1 - */ 0, +/* ba2 - */ 0, +/* ba3 - */ 0, +/* ba4 - */ 0, +/* ba5 - */ 0, +/* ba6 - */ 0, +/* ba7 - */ 0, +/* ba8 - */ 0, +/* ba9 - */ 0, +/* baa - */ 0, +/* bab - */ 0, +/* bac - */ 0, +/* bad - */ 0, +/* bae - */ 0, +/* baf - */ 0, +/* bb0 - */ 0, +/* bb1 - */ 0, +/* bb2 - */ 0, +/* bb3 - */ 0, +/* bb4 - */ 0, +/* bb5 - */ 0, +/* bb6 - */ 0, +/* bb7 - */ 0, +/* bb8 - */ 0, +/* bb9 - */ 0, +/* bba - */ 0, +/* bbb - */ 0, +/* bbc - */ 0, +/* bbd - */ 0, +/* bbe - */ 0, +/* bbf - */ 0, +/* bc0 - */ 0, +/* bc1 - */ 0, +/* bc2 - */ 0, +/* bc3 - */ 0, +/* bc4 - */ 0, +/* bc5 - */ 0, +/* bc6 - */ 0, +/* bc7 - */ 0, +/* bc8 - */ 0, +/* bc9 - */ 0, +/* bca - */ 0, +/* bcb - */ 0, +/* bcc - */ 0, +/* bcd - */ 0, +/* bce - */ 0, +/* bcf - */ 0, +/* bd0 - */ 0, +/* bd1 - */ 0, +/* bd2 - */ 0, +/* bd3 - */ 0, +/* bd4 - */ 0, +/* bd5 - */ 0, +/* bd6 - */ 0, +/* bd7 - */ 0, +/* bd8 - */ 0, +/* bd9 - */ 0, +/* bda - */ 0, +/* bdb - */ 0, +/* bdc - */ 0, +/* bdd - */ 0, +/* bde - */ 0, +/* bdf - */ 0, +/* be0 - */ 0, +/* be1 - */ 0, +/* be2 - */ 0, +/* be3 - */ 0, +/* be4 - */ 0, +/* be5 - */ 0, +/* be6 - */ 0, +/* be7 - */ 0, +/* be8 - */ 0, +/* be9 - */ 0, +/* bea - */ 0, +/* beb - */ 0, +/* bec - */ 0, +/* bed - */ 0, +/* bee - */ 0, +/* bef - */ 0, +/* bf0 - */ 0, +/* bf1 - */ 0, +/* bf2 - */ 0, +/* bf3 - */ 0, +/* bf4 - */ 0, +/* bf5 - */ 0, +/* bf6 - */ 0, +/* bf7 - */ 0, +/* bf8 - */ 0, +/* bf9 - */ 0, +/* bfa - */ 0, +/* bfb - */ 0, +/* bfc - */ 0, +/* bfd - */ 0, +/* bfe - */ 0, +/* bff - */ 0, +/* c00 - */ 0, +/* c01 - */ 0, +/* c02 - */ 0, +/* c03 - */ 0, +/* c04 - */ 0, +/* c05 - */ 0, +/* c06 - */ 0, +/* c07 - */ 0, +/* c08 - */ 0, +/* c09 - */ 0, +/* c0a - */ 0, +/* c0b - */ 0, +/* c0c - */ 0, +/* c0d - */ 0, +/* c0e - */ 0, +/* c0f - */ 0, +/* c10 - */ 0, +/* c11 - */ 0, +/* c12 - */ 0, +/* c13 - */ 0, +/* c14 - */ 0, +/* c15 - */ 0, +/* c16 - */ 0, +/* c17 - */ 0, +/* c18 - */ 0, +/* c19 - */ 0, +/* c1a - */ 0, +/* c1b - */ 0, +/* c1c - */ 0, +/* c1d - */ 0, +/* c1e - */ 0, +/* c1f - */ 0, +/* c20 - */ 0, +/* c21 - */ 0, +/* c22 - */ 0, +/* c23 - */ 0, +/* c24 - */ 0, +/* c25 - */ 0, +/* c26 - */ 0, +/* c27 - */ 0, +/* c28 - */ 0, +/* c29 - */ 0, +/* c2a - */ 0, +/* c2b - */ 0, +/* c2c - */ 0, +/* c2d - */ 0, +/* c2e - */ 0, +/* c2f - */ 0, +/* c30 - */ 0, +/* c31 - */ 0, +/* c32 - */ 0, +/* c33 - */ 0, +/* c34 - */ 0, +/* c35 - */ 0, +/* c36 - */ 0, +/* c37 - */ 0, +/* c38 - */ 0, +/* c39 - */ 0, +/* c3a - */ 0, +/* c3b - */ 0, +/* c3c - */ 0, +/* c3d - */ 0, +/* c3e - */ 0, +/* c3f - */ 0, +/* c40 - */ 0, +/* c41 - */ 0, +/* c42 - */ 0, +/* c43 - */ 0, +/* c44 - */ 0, +/* c45 - */ 0, +/* c46 - */ 0, +/* c47 - */ 0, +/* c48 - */ 0, +/* c49 - */ 0, +/* c4a - */ 0, +/* c4b - */ 0, +/* c4c - */ 0, +/* c4d - */ 0, +/* c4e - */ 0, +/* c4f - */ 0, +/* c50 - */ 0, +/* c51 - */ 0, +/* c52 - */ 0, +/* c53 - */ 0, +/* c54 - */ 0, +/* c55 - _66_0F_7C */ 0x23fe, +/* c56 - */ 0, +/* c57 - _F2_0F_7C */ 0x23ff, +/* c58 - */ 0, +/* c59 - _V_66_0F_7C */ 0x4087, +/* c5a - */ 0, +/* c5b - _V_F2_0F_7C */ 0x4088, +/* c5c - */ 0, +/* c5d - */ 0, +/* c5e - */ 0, +/* c5f - */ 0, +/* c60 - */ 0, +/* c61 - _66_0F_7D */ 0x2400, +/* c62 - */ 0, +/* c63 - _F2_0F_7D */ 0x2401, +/* c64 - */ 0, +/* c65 - _V_66_0F_7D */ 0x4089, +/* c66 - */ 0, +/* c67 - _V_F2_0F_7D */ 0x408a, +/* c68 - */ 0, +/* c69 - */ 0, +/* c6a - */ 0, +/* c6b - */ 0, +/* c6c - _0F_7E */ 0x408b, +/* c6d - _66_0F_7E */ 0x408c, +/* c6e - _F3_0F_7E */ 0x2402, +/* c6f - */ 0, +/* c70 - */ 0, +/* c71 - _V_66_0F_7E */ 0x408d, +/* c72 - _V_F3_0F_7E */ 0x408e, +/* c73 - */ 0, +/* c74 - */ 0, +/* c75 - */ 0, +/* c76 - */ 0, +/* c77 - */ 0, +/* c78 - _0F_7F */ 0x2403, +/* c79 - _66_0F_7F */ 0x2404, +/* c7a - _F3_0F_7F */ 0x2405, +/* c7b - */ 0, +/* c7c - */ 0, +/* c7d - _V_66_0F_7F */ 0x408f, +/* c7e - _V_F3_0F_7F */ 0x4090, +/* c7f - */ 0, +/* c80 - */ 0, +/* c81 - */ 0, +/* c82 - */ 0, +/* c83 - */ 0, +/* c84 - _0F_AE_00 */ 0xd5fc, +/* c85 - _0F_AE_01 */ 0xd608, +/* c86 - _0F_AE_02 */ 0xd614, +/* c87 - _0F_AE_03 */ 0xd620, +/* c88 - _0F_AE_04 */ 0x4091, +/* c89 - _0F_AE_05 */ 0x4092, +/* c8a - _0F_AE_06 */ 0x4093, +/* c8b - _0F_AE_07 */ 0x4094, +/* c8c - */ 0, +/* c8d - */ 0, +/* c8e - _F3_0F_B8 */ 0x2406, +/* c8f - */ 0, +/* c90 - */ 0, +/* c91 - */ 0, +/* c92 - */ 0, +/* c93 - */ 0, +/* c94 - */ 0, +/* c95 - */ 0, +/* c96 - */ 0, +/* c97 - */ 0, +/* c98 - */ 0, +/* c99 - */ 0, +/* c9a - */ 0, +/* c9b - */ 0, +/* c9c - _0F_BA_04 */ 0x2407, +/* c9d - _0F_BA_05 */ 0x2408, +/* c9e - _0F_BA_06 */ 0x2409, +/* c9f - _0F_BA_07 */ 0x240a, +/* ca0 - _0F_BC */ 0x240b, +/* ca1 - */ 0, +/* ca2 - _F3_0F_BC */ 0x240c, +/* ca3 - */ 0, +/* ca4 - */ 0, +/* ca5 - */ 0, +/* ca6 - */ 0, +/* ca7 - */ 0, +/* ca8 - */ 0, +/* ca9 - */ 0, +/* caa - */ 0, +/* cab - */ 0, +/* cac - _0F_BD */ 0x240d, +/* cad - */ 0, +/* cae - _F3_0F_BD */ 0x240e, +/* caf - */ 0, +/* cb0 - */ 0, +/* cb1 - */ 0, +/* cb2 - */ 0, +/* cb3 - */ 0, +/* cb4 - */ 0, +/* cb5 - */ 0, +/* cb6 - */ 0, +/* cb7 - */ 0, +/* cb8 - _0F_C2 */ 0x4095, +/* cb9 - _66_0F_C2 */ 0x4096, +/* cba - _F3_0F_C2 */ 0x4097, +/* cbb - _F2_0F_C2 */ 0x4098, +/* cbc - _V_0F_C2 */ 0x4099, +/* cbd - _V_66_0F_C2 */ 0x409a, +/* cbe - _V_F3_0F_C2 */ 0x409b, +/* cbf - _V_F2_0F_C2 */ 0x409c, +/* cc0 - */ 0, +/* cc1 - */ 0, +/* cc2 - */ 0, +/* cc3 - */ 0, +/* cc4 - _0F_C4 */ 0x409d, +/* cc5 - _66_0F_C4 */ 0x409e, +/* cc6 - */ 0, +/* cc7 - */ 0, +/* cc8 - */ 0, +/* cc9 - _V_66_0F_C4 */ 0x409f, +/* cca - */ 0, +/* ccb - */ 0, +/* ccc - */ 0, +/* ccd - */ 0, +/* cce - */ 0, +/* ccf - */ 0, +/* cd0 - _0F_C5 */ 0x40a0, +/* cd1 - _66_0F_C5 */ 0x40a1, +/* cd2 - */ 0, +/* cd3 - */ 0, +/* cd4 - */ 0, +/* cd5 - _V_66_0F_C5 */ 0x40a2, +/* cd6 - */ 0, +/* cd7 - */ 0, +/* cd8 - */ 0, +/* cd9 - */ 0, +/* cda - */ 0, +/* cdb - */ 0, +/* cdc - _0F_C6 */ 0x40a3, +/* cdd - _66_0F_C6 */ 0x40a4, +/* cde - */ 0, +/* cdf - */ 0, +/* ce0 - _V_0F_C6 */ 0x40a5, +/* ce1 - _V_66_0F_C6 */ 0x40a6, +/* ce2 - */ 0, +/* ce3 - */ 0, +/* ce4 - */ 0, +/* ce5 - */ 0, +/* ce6 - */ 0, +/* ce7 - */ 0, +/* ce8 - */ 0, +/* ce9 - _0F_C7_01 */ 0x40a7, +/* cea - */ 0, +/* ceb - */ 0, +/* cec - */ 0, +/* ced - */ 0, +/* cee - _0F_C7_06 */ 0xd62c, +/* cef - _0F_C7_07 */ 0x240f, +/* cf0 - */ 0, +/* cf1 - _66_0F_D0 */ 0x2410, +/* cf2 - */ 0, +/* cf3 - _F2_0F_D0 */ 0x2411, +/* cf4 - */ 0, +/* cf5 - _V_66_0F_D0 */ 0x40a8, +/* cf6 - */ 0, +/* cf7 - _V_F2_0F_D0 */ 0x40a9, +/* cf8 - */ 0, +/* cf9 - */ 0, +/* cfa - */ 0, +/* cfb - */ 0, +/* cfc - _0F_D1 */ 0x2412, +/* cfd - _66_0F_D1 */ 0x2413, +/* cfe - */ 0, +/* cff - */ 0, +/* d00 - */ 0, +/* d01 - _V_66_0F_D1 */ 0x40aa, +/* d02 - */ 0, +/* d03 - */ 0, +/* d04 - */ 0, +/* d05 - */ 0, +/* d06 - */ 0, +/* d07 - */ 0, +/* d08 - _0F_D2 */ 0x2414, +/* d09 - _66_0F_D2 */ 0x2415, +/* d0a - */ 0, +/* d0b - */ 0, +/* d0c - */ 0, +/* d0d - _V_66_0F_D2 */ 0x40ab, +/* d0e - */ 0, +/* d0f - */ 0, +/* d10 - */ 0, +/* d11 - */ 0, +/* d12 - */ 0, +/* d13 - */ 0, +/* d14 - _0F_D3 */ 0x2416, +/* d15 - _66_0F_D3 */ 0x2417, +/* d16 - */ 0, +/* d17 - */ 0, +/* d18 - */ 0, +/* d19 - _V_66_0F_D3 */ 0x40ac, +/* d1a - */ 0, +/* d1b - */ 0, +/* d1c - */ 0, +/* d1d - */ 0, +/* d1e - */ 0, +/* d1f - */ 0, +/* d20 - _0F_D4 */ 0x2418, +/* d21 - _66_0F_D4 */ 0x2419, +/* d22 - */ 0, +/* d23 - */ 0, +/* d24 - */ 0, +/* d25 - _V_66_0F_D4 */ 0x40ad, +/* d26 - */ 0, +/* d27 - */ 0, +/* d28 - */ 0, +/* d29 - */ 0, +/* d2a - */ 0, +/* d2b - */ 0, +/* d2c - _0F_D5 */ 0x241a, +/* d2d - _66_0F_D5 */ 0x241b, +/* d2e - */ 0, +/* d2f - */ 0, +/* d30 - */ 0, +/* d31 - _V_66_0F_D5 */ 0x40ae, +/* d32 - */ 0, +/* d33 - */ 0, +/* d34 - */ 0, +/* d35 - */ 0, +/* d36 - */ 0, +/* d37 - */ 0, +/* d38 - */ 0, +/* d39 - _66_0F_D6 */ 0x241c, +/* d3a - _F3_0F_D6 */ 0x241d, +/* d3b - _F2_0F_D6 */ 0x241e, +/* d3c - */ 0, +/* d3d - _V_66_0F_D6 */ 0x40af, +/* d3e - */ 0, +/* d3f - */ 0, +/* d40 - */ 0, +/* d41 - */ 0, +/* d42 - */ 0, +/* d43 - */ 0, +/* d44 - _0F_D7 */ 0x241f, +/* d45 - _66_0F_D7 */ 0x2420, +/* d46 - */ 0, +/* d47 - */ 0, +/* d48 - */ 0, +/* d49 - _V_66_0F_D7 */ 0x40b0, +/* d4a - */ 0, +/* d4b - */ 0, +/* d4c - */ 0, +/* d4d - */ 0, +/* d4e - */ 0, +/* d4f - */ 0, +/* d50 - _0F_D8 */ 0x2421, +/* d51 - _66_0F_D8 */ 0x2422, +/* d52 - */ 0, +/* d53 - */ 0, +/* d54 - */ 0, +/* d55 - _V_66_0F_D8 */ 0x40b1, +/* d56 - */ 0, +/* d57 - */ 0, +/* d58 - */ 0, +/* d59 - */ 0, +/* d5a - */ 0, +/* d5b - */ 0, +/* d5c - _0F_D9 */ 0x2423, +/* d5d - _66_0F_D9 */ 0x2424, +/* d5e - */ 0, +/* d5f - */ 0, +/* d60 - */ 0, +/* d61 - _V_66_0F_D9 */ 0x40b2, +/* d62 - */ 0, +/* d63 - */ 0, +/* d64 - */ 0, +/* d65 - */ 0, +/* d66 - */ 0, +/* d67 - */ 0, +/* d68 - _0F_DA */ 0x2425, +/* d69 - _66_0F_DA */ 0x2426, +/* d6a - */ 0, +/* d6b - */ 0, +/* d6c - */ 0, +/* d6d - _V_66_0F_DA */ 0x40b3, +/* d6e - */ 0, +/* d6f - */ 0, +/* d70 - */ 0, +/* d71 - */ 0, +/* d72 - */ 0, +/* d73 - */ 0, +/* d74 - _0F_DB */ 0x2427, +/* d75 - _66_0F_DB */ 0x2428, +/* d76 - */ 0, +/* d77 - */ 0, +/* d78 - */ 0, +/* d79 - _V_66_0F_DB */ 0x40b4, +/* d7a - */ 0, +/* d7b - */ 0, +/* d7c - */ 0, +/* d7d - */ 0, +/* d7e - */ 0, +/* d7f - */ 0, +/* d80 - _0F_DC */ 0x2429, +/* d81 - _66_0F_DC */ 0x242a, +/* d82 - */ 0, +/* d83 - */ 0, +/* d84 - */ 0, +/* d85 - _V_66_0F_DC */ 0x40b5, +/* d86 - */ 0, +/* d87 - */ 0, +/* d88 - */ 0, +/* d89 - */ 0, +/* d8a - */ 0, +/* d8b - */ 0, +/* d8c - _0F_DD */ 0x242b, +/* d8d - _66_0F_DD */ 0x242c, +/* d8e - */ 0, +/* d8f - */ 0, +/* d90 - */ 0, +/* d91 - _V_66_0F_DD */ 0x40b6, +/* d92 - */ 0, +/* d93 - */ 0, +/* d94 - */ 0, +/* d95 - */ 0, +/* d96 - */ 0, +/* d97 - */ 0, +/* d98 - _0F_DE */ 0x242d, +/* d99 - _66_0F_DE */ 0x242e, +/* d9a - */ 0, +/* d9b - */ 0, +/* d9c - */ 0, +/* d9d - _V_66_0F_DE */ 0x40b7, +/* d9e - */ 0, +/* d9f - */ 0, +/* da0 - */ 0, +/* da1 - */ 0, +/* da2 - */ 0, +/* da3 - */ 0, +/* da4 - _0F_DF */ 0x242f, +/* da5 - _66_0F_DF */ 0x2430, +/* da6 - */ 0, +/* da7 - */ 0, +/* da8 - */ 0, +/* da9 - _V_66_0F_DF */ 0x40b8, +/* daa - */ 0, +/* dab - */ 0, +/* dac - */ 0, +/* dad - */ 0, +/* dae - */ 0, +/* daf - */ 0, +/* db0 - _0F_E0 */ 0x2431, +/* db1 - _66_0F_E0 */ 0x2432, +/* db2 - */ 0, +/* db3 - */ 0, +/* db4 - */ 0, +/* db5 - _V_66_0F_E0 */ 0x40b9, +/* db6 - */ 0, +/* db7 - */ 0, +/* db8 - */ 0, +/* db9 - */ 0, +/* dba - */ 0, +/* dbb - */ 0, +/* dbc - _0F_E1 */ 0x2433, +/* dbd - _66_0F_E1 */ 0x2434, +/* dbe - */ 0, +/* dbf - */ 0, +/* dc0 - */ 0, +/* dc1 - _V_66_0F_E1 */ 0x40ba, +/* dc2 - */ 0, +/* dc3 - */ 0, +/* dc4 - */ 0, +/* dc5 - */ 0, +/* dc6 - */ 0, +/* dc7 - */ 0, +/* dc8 - _0F_E2 */ 0x2435, +/* dc9 - _66_0F_E2 */ 0x2436, +/* dca - */ 0, +/* dcb - */ 0, +/* dcc - */ 0, +/* dcd - _V_66_0F_E2 */ 0x40bb, +/* dce - */ 0, +/* dcf - */ 0, +/* dd0 - */ 0, +/* dd1 - */ 0, +/* dd2 - */ 0, +/* dd3 - */ 0, +/* dd4 - _0F_E3 */ 0x2437, +/* dd5 - _66_0F_E3 */ 0x2438, +/* dd6 - */ 0, +/* dd7 - */ 0, +/* dd8 - */ 0, +/* dd9 - _V_66_0F_E3 */ 0x40bc, +/* dda - */ 0, +/* ddb - */ 0, +/* ddc - */ 0, +/* ddd - */ 0, +/* dde - */ 0, +/* ddf - */ 0, +/* de0 - _0F_E4 */ 0x2439, +/* de1 - _66_0F_E4 */ 0x243a, +/* de2 - */ 0, +/* de3 - */ 0, +/* de4 - */ 0, +/* de5 - _V_66_0F_E4 */ 0x40bd, +/* de6 - */ 0, +/* de7 - */ 0, +/* de8 - */ 0, +/* de9 - */ 0, +/* dea - */ 0, +/* deb - */ 0, +/* dec - _0F_E5 */ 0x243b, +/* ded - _66_0F_E5 */ 0x243c, +/* dee - */ 0, +/* def - */ 0, +/* df0 - */ 0, +/* df1 - _V_66_0F_E5 */ 0x40be, +/* df2 - */ 0, +/* df3 - */ 0, +/* df4 - */ 0, +/* df5 - */ 0, +/* df6 - */ 0, +/* df7 - */ 0, +/* df8 - */ 0, +/* df9 - _66_0F_E6 */ 0x243d, +/* dfa - _F3_0F_E6 */ 0x243e, +/* dfb - _F2_0F_E6 */ 0x243f, +/* dfc - */ 0, +/* dfd - _V_66_0F_E6 */ 0x40bf, +/* dfe - _V_F3_0F_E6 */ 0x40c0, +/* dff - _V_F2_0F_E6 */ 0x40c1, +/* e00 - */ 0, +/* e01 - */ 0, +/* e02 - */ 0, +/* e03 - */ 0, +/* e04 - _0F_E7 */ 0x2440, +/* e05 - _66_0F_E7 */ 0x2441, +/* e06 - */ 0, +/* e07 - */ 0, +/* e08 - */ 0, +/* e09 - _V_66_0F_E7 */ 0x40c2, +/* e0a - */ 0, +/* e0b - */ 0, +/* e0c - */ 0, +/* e0d - */ 0, +/* e0e - */ 0, +/* e0f - */ 0, +/* e10 - _0F_E8 */ 0x2442, +/* e11 - _66_0F_E8 */ 0x2443, +/* e12 - */ 0, +/* e13 - */ 0, +/* e14 - */ 0, +/* e15 - _V_66_0F_E8 */ 0x40c3, +/* e16 - */ 0, +/* e17 - */ 0, +/* e18 - */ 0, +/* e19 - */ 0, +/* e1a - */ 0, +/* e1b - */ 0, +/* e1c - _0F_E9 */ 0x2444, +/* e1d - _66_0F_E9 */ 0x2445, +/* e1e - */ 0, +/* e1f - */ 0, +/* e20 - */ 0, +/* e21 - _V_66_0F_E9 */ 0x40c4, +/* e22 - */ 0, +/* e23 - */ 0, +/* e24 - */ 0, +/* e25 - */ 0, +/* e26 - */ 0, +/* e27 - */ 0, +/* e28 - _0F_EA */ 0x2446, +/* e29 - _66_0F_EA */ 0x2447, +/* e2a - */ 0, +/* e2b - */ 0, +/* e2c - */ 0, +/* e2d - _V_66_0F_EA */ 0x40c5, +/* e2e - */ 0, +/* e2f - */ 0, +/* e30 - */ 0, +/* e31 - */ 0, +/* e32 - */ 0, +/* e33 - */ 0, +/* e34 - _0F_EB */ 0x2448, +/* e35 - _66_0F_EB */ 0x2449, +/* e36 - */ 0, +/* e37 - */ 0, +/* e38 - */ 0, +/* e39 - _V_66_0F_EB */ 0x40c6, +/* e3a - */ 0, +/* e3b - */ 0, +/* e3c - */ 0, +/* e3d - */ 0, +/* e3e - */ 0, +/* e3f - */ 0, +/* e40 - _0F_EC */ 0x244a, +/* e41 - _66_0F_EC */ 0x244b, +/* e42 - */ 0, +/* e43 - */ 0, +/* e44 - */ 0, +/* e45 - _V_66_0F_EC */ 0x40c7, +/* e46 - */ 0, +/* e47 - */ 0, +/* e48 - */ 0, +/* e49 - */ 0, +/* e4a - */ 0, +/* e4b - */ 0, +/* e4c - _0F_ED */ 0x244c, +/* e4d - _66_0F_ED */ 0x244d, +/* e4e - */ 0, +/* e4f - */ 0, +/* e50 - */ 0, +/* e51 - _V_66_0F_ED */ 0x40c8, +/* e52 - */ 0, +/* e53 - */ 0, +/* e54 - */ 0, +/* e55 - */ 0, +/* e56 - */ 0, +/* e57 - */ 0, +/* e58 - _0F_EE */ 0x244e, +/* e59 - _66_0F_EE */ 0x244f, +/* e5a - */ 0, +/* e5b - */ 0, +/* e5c - */ 0, +/* e5d - _V_66_0F_EE */ 0x40c9, +/* e5e - */ 0, +/* e5f - */ 0, +/* e60 - */ 0, +/* e61 - */ 0, +/* e62 - */ 0, +/* e63 - */ 0, +/* e64 - _0F_EF */ 0x2450, +/* e65 - _66_0F_EF */ 0x2451, +/* e66 - */ 0, +/* e67 - */ 0, +/* e68 - */ 0, +/* e69 - _V_66_0F_EF */ 0x40ca, +/* e6a - */ 0, +/* e6b - */ 0, +/* e6c - */ 0, +/* e6d - */ 0, +/* e6e - */ 0, +/* e6f - */ 0, +/* e70 - */ 0, +/* e71 - */ 0, +/* e72 - */ 0, +/* e73 - _F2_0F_F0 */ 0x2452, +/* e74 - */ 0, +/* e75 - */ 0, +/* e76 - */ 0, +/* e77 - _V_F2_0F_F0 */ 0x40cb, +/* e78 - */ 0, +/* e79 - */ 0, +/* e7a - */ 0, +/* e7b - */ 0, +/* e7c - _0F_F1 */ 0x2453, +/* e7d - _66_0F_F1 */ 0x2454, +/* e7e - */ 0, +/* e7f - */ 0, +/* e80 - */ 0, +/* e81 - _V_66_0F_F1 */ 0x40cc, +/* e82 - */ 0, +/* e83 - */ 0, +/* e84 - */ 0, +/* e85 - */ 0, +/* e86 - */ 0, +/* e87 - */ 0, +/* e88 - _0F_F2 */ 0x2455, +/* e89 - _66_0F_F2 */ 0x2456, +/* e8a - */ 0, +/* e8b - */ 0, +/* e8c - */ 0, +/* e8d - _V_66_0F_F2 */ 0x40cd, +/* e8e - */ 0, +/* e8f - */ 0, +/* e90 - */ 0, +/* e91 - */ 0, +/* e92 - */ 0, +/* e93 - */ 0, +/* e94 - _0F_F3 */ 0x2457, +/* e95 - _66_0F_F3 */ 0x2458, +/* e96 - */ 0, +/* e97 - */ 0, +/* e98 - */ 0, +/* e99 - _V_66_0F_F3 */ 0x40ce, +/* e9a - */ 0, +/* e9b - */ 0, +/* e9c - */ 0, +/* e9d - */ 0, +/* e9e - */ 0, +/* e9f - */ 0, +/* ea0 - _0F_F4 */ 0x2459, +/* ea1 - _66_0F_F4 */ 0x245a, +/* ea2 - */ 0, +/* ea3 - */ 0, +/* ea4 - */ 0, +/* ea5 - _V_66_0F_F4 */ 0x40cf, +/* ea6 - */ 0, +/* ea7 - */ 0, +/* ea8 - */ 0, +/* ea9 - */ 0, +/* eaa - */ 0, +/* eab - */ 0, +/* eac - _0F_F5 */ 0x245b, +/* ead - _66_0F_F5 */ 0x245c, +/* eae - */ 0, +/* eaf - */ 0, +/* eb0 - */ 0, +/* eb1 - _V_66_0F_F5 */ 0x40d0, +/* eb2 - */ 0, +/* eb3 - */ 0, +/* eb4 - */ 0, +/* eb5 - */ 0, +/* eb6 - */ 0, +/* eb7 - */ 0, +/* eb8 - _0F_F6 */ 0x245d, +/* eb9 - _66_0F_F6 */ 0x245e, +/* eba - */ 0, +/* ebb - */ 0, +/* ebc - */ 0, +/* ebd - _V_66_0F_F6 */ 0x40d1, +/* ebe - */ 0, +/* ebf - */ 0, +/* ec0 - */ 0, +/* ec1 - */ 0, +/* ec2 - */ 0, +/* ec3 - */ 0, +/* ec4 - _0F_F7 */ 0x245f, +/* ec5 - _66_0F_F7 */ 0x2460, +/* ec6 - */ 0, +/* ec7 - */ 0, +/* ec8 - */ 0, +/* ec9 - _V_66_0F_F7 */ 0x40d2, +/* eca - */ 0, +/* ecb - */ 0, +/* ecc - */ 0, +/* ecd - */ 0, +/* ece - */ 0, +/* ecf - */ 0, +/* ed0 - _0F_F8 */ 0x2461, +/* ed1 - _66_0F_F8 */ 0x2462, +/* ed2 - */ 0, +/* ed3 - */ 0, +/* ed4 - */ 0, +/* ed5 - _V_66_0F_F8 */ 0x40d3, +/* ed6 - */ 0, +/* ed7 - */ 0, +/* ed8 - */ 0, +/* ed9 - */ 0, +/* eda - */ 0, +/* edb - */ 0, +/* edc - _0F_F9 */ 0x2463, +/* edd - _66_0F_F9 */ 0x2464, +/* ede - */ 0, +/* edf - */ 0, +/* ee0 - */ 0, +/* ee1 - _V_66_0F_F9 */ 0x40d4, +/* ee2 - */ 0, +/* ee3 - */ 0, +/* ee4 - */ 0, +/* ee5 - */ 0, +/* ee6 - */ 0, +/* ee7 - */ 0, +/* ee8 - _0F_FA */ 0x2465, +/* ee9 - _66_0F_FA */ 0x2466, +/* eea - */ 0, +/* eeb - */ 0, +/* eec - */ 0, +/* eed - _V_66_0F_FA */ 0x40d5, +/* eee - */ 0, +/* eef - */ 0, +/* ef0 - */ 0, +/* ef1 - */ 0, +/* ef2 - */ 0, +/* ef3 - */ 0, +/* ef4 - _0F_FB */ 0x2467, +/* ef5 - _66_0F_FB */ 0x2468, +/* ef6 - */ 0, +/* ef7 - */ 0, +/* ef8 - */ 0, +/* ef9 - _V_66_0F_FB */ 0x40d6, +/* efa - */ 0, +/* efb - */ 0, +/* efc - */ 0, +/* efd - */ 0, +/* efe - */ 0, +/* eff - */ 0, +/* f00 - _0F_FC */ 0x2469, +/* f01 - _66_0F_FC */ 0x246a, +/* f02 - */ 0, +/* f03 - */ 0, +/* f04 - */ 0, +/* f05 - _V_66_0F_FC */ 0x40d7, +/* f06 - */ 0, +/* f07 - */ 0, +/* f08 - */ 0, +/* f09 - */ 0, +/* f0a - */ 0, +/* f0b - */ 0, +/* f0c - _0F_FD */ 0x246b, +/* f0d - _66_0F_FD */ 0x246c, +/* f0e - */ 0, +/* f0f - */ 0, +/* f10 - */ 0, +/* f11 - _V_66_0F_FD */ 0x40d8, +/* f12 - */ 0, +/* f13 - */ 0, +/* f14 - */ 0, +/* f15 - */ 0, +/* f16 - */ 0, +/* f17 - */ 0, +/* f18 - _0F_FE */ 0x246d, +/* f19 - _66_0F_FE */ 0x246e, +/* f1a - */ 0, +/* f1b - */ 0, +/* f1c - */ 0, +/* f1d - _V_66_0F_FE */ 0x40d9, +/* f1e - */ 0, +/* f1f - */ 0, +/* f20 - */ 0, +/* f21 - */ 0, +/* f22 - */ 0, +/* f23 - */ 0, +/* f24 - _D9_06 */ 0x246f, +/* f25 - _9B_D9_06 */ 0x2470, +/* f26 - */ 0, +/* f27 - */ 0, +/* f28 - */ 0, +/* f29 - */ 0, +/* f2a - */ 0, +/* f2b - */ 0, +/* f2c - */ 0, +/* f2d - */ 0, +/* f2e - */ 0, +/* f2f - */ 0, +/* f30 - _D9_07 */ 0x2471, +/* f31 - _9B_D9_07 */ 0x2472, +/* f32 - */ 0, +/* f33 - */ 0, +/* f34 - */ 0, +/* f35 - */ 0, +/* f36 - */ 0, +/* f37 - */ 0, +/* f38 - */ 0, +/* f39 - */ 0, +/* f3a - */ 0, +/* f3b - */ 0, +/* f3c - _DB_E2 */ 0x2473, +/* f3d - _9B_DB_E2 */ 0x2474, +/* f3e - */ 0, +/* f3f - */ 0, +/* f40 - */ 0, +/* f41 - */ 0, +/* f42 - */ 0, +/* f43 - */ 0, +/* f44 - */ 0, +/* f45 - */ 0, +/* f46 - */ 0, +/* f47 - */ 0, +/* f48 - _DB_E3 */ 0x2475, +/* f49 - _9B_DB_E3 */ 0x2476, +/* f4a - */ 0, +/* f4b - */ 0, +/* f4c - */ 0, +/* f4d - */ 0, +/* f4e - */ 0, +/* f4f - */ 0, +/* f50 - */ 0, +/* f51 - */ 0, +/* f52 - */ 0, +/* f53 - */ 0, +/* f54 - _DD_06 */ 0x2477, +/* f55 - _9B_DD_06 */ 0x2478, +/* f56 - */ 0, +/* f57 - */ 0, +/* f58 - */ 0, +/* f59 - */ 0, +/* f5a - */ 0, +/* f5b - */ 0, +/* f5c - */ 0, +/* f5d - */ 0, +/* f5e - */ 0, +/* f5f - */ 0, +/* f60 - _DD_07 */ 0x2479, +/* f61 - _9B_DD_07 */ 0x247a, +/* f62 - */ 0, +/* f63 - */ 0, +/* f64 - */ 0, +/* f65 - */ 0, +/* f66 - */ 0, +/* f67 - */ 0, +/* f68 - */ 0, +/* f69 - */ 0, +/* f6a - */ 0, +/* f6b - */ 0, +/* f6c - _DF_E0 */ 0x247b, +/* f6d - _9B_DF_E0 */ 0x247c, +/* f6e - */ 0, +/* f6f - */ 0, +/* f70 - */ 0, +/* f71 - */ 0, +/* f72 - */ 0, +/* f73 - */ 0, +/* f74 - */ 0, +/* f75 - */ 0, +/* f76 - */ 0, +/* f77 - */ 0, +/* f78 - _0F_38_00 */ 0x247d, +/* f79 - _66_0F_38_00 */ 0x247e, +/* f7a - */ 0, +/* f7b - */ 0, +/* f7c - */ 0, +/* f7d - _V_66_0F_38_00 */ 0x40da, +/* f7e - */ 0, +/* f7f - */ 0, +/* f80 - */ 0, +/* f81 - */ 0, +/* f82 - */ 0, +/* f83 - */ 0, +/* f84 - _0F_38_01 */ 0x247f, +/* f85 - _66_0F_38_01 */ 0x2480, +/* f86 - */ 0, +/* f87 - */ 0, +/* f88 - */ 0, +/* f89 - _V_66_0F_38_01 */ 0x40db, +/* f8a - */ 0, +/* f8b - */ 0, +/* f8c - */ 0, +/* f8d - */ 0, +/* f8e - */ 0, +/* f8f - */ 0, +/* f90 - _0F_38_02 */ 0x2481, +/* f91 - _66_0F_38_02 */ 0x2482, +/* f92 - */ 0, +/* f93 - */ 0, +/* f94 - */ 0, +/* f95 - _V_66_0F_38_02 */ 0x40dc, +/* f96 - */ 0, +/* f97 - */ 0, +/* f98 - */ 0, +/* f99 - */ 0, +/* f9a - */ 0, +/* f9b - */ 0, +/* f9c - _0F_38_03 */ 0x2483, +/* f9d - _66_0F_38_03 */ 0x2484, +/* f9e - */ 0, +/* f9f - */ 0, +/* fa0 - */ 0, +/* fa1 - _V_66_0F_38_03 */ 0x40dd, +/* fa2 - */ 0, +/* fa3 - */ 0, +/* fa4 - */ 0, +/* fa5 - */ 0, +/* fa6 - */ 0, +/* fa7 - */ 0, +/* fa8 - _0F_38_04 */ 0x2485, +/* fa9 - _66_0F_38_04 */ 0x2486, +/* faa - */ 0, +/* fab - */ 0, +/* fac - */ 0, +/* fad - _V_66_0F_38_04 */ 0x40de, +/* fae - */ 0, +/* faf - */ 0, +/* fb0 - */ 0, +/* fb1 - */ 0, +/* fb2 - */ 0, +/* fb3 - */ 0, +/* fb4 - _0F_38_05 */ 0x2487, +/* fb5 - _66_0F_38_05 */ 0x2488, +/* fb6 - */ 0, +/* fb7 - */ 0, +/* fb8 - */ 0, +/* fb9 - _V_66_0F_38_05 */ 0x40df, +/* fba - */ 0, +/* fbb - */ 0, +/* fbc - */ 0, +/* fbd - */ 0, +/* fbe - */ 0, +/* fbf - */ 0, +/* fc0 - _0F_38_06 */ 0x2489, +/* fc1 - _66_0F_38_06 */ 0x248a, +/* fc2 - */ 0, +/* fc3 - */ 0, +/* fc4 - */ 0, +/* fc5 - _V_66_0F_38_06 */ 0x40e0, +/* fc6 - */ 0, +/* fc7 - */ 0, +/* fc8 - */ 0, +/* fc9 - */ 0, +/* fca - */ 0, +/* fcb - */ 0, +/* fcc - _0F_38_07 */ 0x248b, +/* fcd - _66_0F_38_07 */ 0x248c, +/* fce - */ 0, +/* fcf - */ 0, +/* fd0 - */ 0, +/* fd1 - _V_66_0F_38_07 */ 0x40e1, +/* fd2 - */ 0, +/* fd3 - */ 0, +/* fd4 - */ 0, +/* fd5 - */ 0, +/* fd6 - */ 0, +/* fd7 - */ 0, +/* fd8 - _0F_38_08 */ 0x248d, +/* fd9 - _66_0F_38_08 */ 0x248e, +/* fda - */ 0, +/* fdb - */ 0, +/* fdc - */ 0, +/* fdd - _V_66_0F_38_08 */ 0x40e2, +/* fde - */ 0, +/* fdf - */ 0, +/* fe0 - */ 0, +/* fe1 - */ 0, +/* fe2 - */ 0, +/* fe3 - */ 0, +/* fe4 - _0F_38_09 */ 0x248f, +/* fe5 - _66_0F_38_09 */ 0x2490, +/* fe6 - */ 0, +/* fe7 - */ 0, +/* fe8 - */ 0, +/* fe9 - _V_66_0F_38_09 */ 0x40e3, +/* fea - */ 0, +/* feb - */ 0, +/* fec - */ 0, +/* fed - */ 0, +/* fee - */ 0, +/* fef - */ 0, +/* ff0 - _0F_38_0A */ 0x2491, +/* ff1 - _66_0F_38_0A */ 0x2492, +/* ff2 - */ 0, +/* ff3 - */ 0, +/* ff4 - */ 0, +/* ff5 - _V_66_0F_38_0A */ 0x40e4, +/* ff6 - */ 0, +/* ff7 - */ 0, +/* ff8 - */ 0, +/* ff9 - */ 0, +/* ffa - */ 0, +/* ffb - */ 0, +/* ffc - _0F_38_0B */ 0x2493, +/* ffd - _66_0F_38_0B */ 0x2494, +/* ffe - */ 0, +/* fff - */ 0, +/* 1000 - */ 0, +/* 1001 - _V_66_0F_38_0B */ 0x40e5, +/* 1002 - */ 0, +/* 1003 - */ 0, +/* 1004 - */ 0, +/* 1005 - */ 0, +/* 1006 - */ 0, +/* 1007 - */ 0, +/* 1008 - */ 0, +/* 1009 - */ 0, +/* 100a - */ 0, +/* 100b - */ 0, +/* 100c - */ 0, +/* 100d - _V_66_0F_38_0C */ 0x40e6, +/* 100e - */ 0, +/* 100f - */ 0, +/* 1010 - */ 0, +/* 1011 - */ 0, +/* 1012 - */ 0, +/* 1013 - */ 0, +/* 1014 - */ 0, +/* 1015 - */ 0, +/* 1016 - */ 0, +/* 1017 - */ 0, +/* 1018 - */ 0, +/* 1019 - _V_66_0F_38_0D */ 0x40e7, +/* 101a - */ 0, +/* 101b - */ 0, +/* 101c - */ 0, +/* 101d - */ 0, +/* 101e - */ 0, +/* 101f - */ 0, +/* 1020 - */ 0, +/* 1021 - */ 0, +/* 1022 - */ 0, +/* 1023 - */ 0, +/* 1024 - */ 0, +/* 1025 - _V_66_0F_38_0E */ 0x40e8, +/* 1026 - */ 0, +/* 1027 - */ 0, +/* 1028 - */ 0, +/* 1029 - */ 0, +/* 102a - */ 0, +/* 102b - */ 0, +/* 102c - */ 0, +/* 102d - */ 0, +/* 102e - */ 0, +/* 102f - */ 0, +/* 1030 - */ 0, +/* 1031 - _V_66_0F_38_0F */ 0x40e9, +/* 1032 - */ 0, +/* 1033 - */ 0, +/* 1034 - */ 0, +/* 1035 - */ 0, +/* 1036 - */ 0, +/* 1037 - */ 0, +/* 1038 - */ 0, +/* 1039 - _66_0F_38_10 */ 0x40ea, +/* 103a - */ 0, +/* 103b - */ 0, +/* 103c - */ 0, +/* 103d - */ 0, +/* 103e - */ 0, +/* 103f - */ 0, +/* 1040 - */ 0, +/* 1041 - */ 0, +/* 1042 - */ 0, +/* 1043 - */ 0, +/* 1044 - */ 0, +/* 1045 - _66_0F_38_14 */ 0x40eb, +/* 1046 - */ 0, +/* 1047 - */ 0, +/* 1048 - */ 0, +/* 1049 - */ 0, +/* 104a - */ 0, +/* 104b - */ 0, +/* 104c - */ 0, +/* 104d - */ 0, +/* 104e - */ 0, +/* 104f - */ 0, +/* 1050 - */ 0, +/* 1051 - _66_0F_38_15 */ 0x40ec, +/* 1052 - */ 0, +/* 1053 - */ 0, +/* 1054 - */ 0, +/* 1055 - */ 0, +/* 1056 - */ 0, +/* 1057 - */ 0, +/* 1058 - */ 0, +/* 1059 - */ 0, +/* 105a - */ 0, +/* 105b - */ 0, +/* 105c - */ 0, +/* 105d - _66_0F_38_17 */ 0x2495, +/* 105e - */ 0, +/* 105f - */ 0, +/* 1060 - */ 0, +/* 1061 - _V_66_0F_38_17 */ 0x40ed, +/* 1062 - */ 0, +/* 1063 - */ 0, +/* 1064 - */ 0, +/* 1065 - */ 0, +/* 1066 - */ 0, +/* 1067 - */ 0, +/* 1068 - */ 0, +/* 1069 - */ 0, +/* 106a - */ 0, +/* 106b - */ 0, +/* 106c - */ 0, +/* 106d - _V_66_0F_38_18 */ 0x40ee, +/* 106e - */ 0, +/* 106f - */ 0, +/* 1070 - */ 0, +/* 1071 - */ 0, +/* 1072 - */ 0, +/* 1073 - */ 0, +/* 1074 - */ 0, +/* 1075 - */ 0, +/* 1076 - */ 0, +/* 1077 - */ 0, +/* 1078 - */ 0, +/* 1079 - _V_66_0F_38_19 */ 0x40ef, +/* 107a - */ 0, +/* 107b - */ 0, +/* 107c - */ 0, +/* 107d - */ 0, +/* 107e - */ 0, +/* 107f - */ 0, +/* 1080 - */ 0, +/* 1081 - */ 0, +/* 1082 - */ 0, +/* 1083 - */ 0, +/* 1084 - */ 0, +/* 1085 - _V_66_0F_38_1A */ 0x40f0, +/* 1086 - */ 0, +/* 1087 - */ 0, +/* 1088 - */ 0, +/* 1089 - */ 0, +/* 108a - */ 0, +/* 108b - */ 0, +/* 108c - _0F_38_1C */ 0x2496, +/* 108d - _66_0F_38_1C */ 0x2497, +/* 108e - */ 0, +/* 108f - */ 0, +/* 1090 - */ 0, +/* 1091 - _V_66_0F_38_1C */ 0x40f1, +/* 1092 - */ 0, +/* 1093 - */ 0, +/* 1094 - */ 0, +/* 1095 - */ 0, +/* 1096 - */ 0, +/* 1097 - */ 0, +/* 1098 - _0F_38_1D */ 0x2498, +/* 1099 - _66_0F_38_1D */ 0x2499, +/* 109a - */ 0, +/* 109b - */ 0, +/* 109c - */ 0, +/* 109d - _V_66_0F_38_1D */ 0x40f2, +/* 109e - */ 0, +/* 109f - */ 0, +/* 10a0 - */ 0, +/* 10a1 - */ 0, +/* 10a2 - */ 0, +/* 10a3 - */ 0, +/* 10a4 - _0F_38_1E */ 0x249a, +/* 10a5 - _66_0F_38_1E */ 0x249b, +/* 10a6 - */ 0, +/* 10a7 - */ 0, +/* 10a8 - */ 0, +/* 10a9 - _V_66_0F_38_1E */ 0x40f3, +/* 10aa - */ 0, +/* 10ab - */ 0, +/* 10ac - */ 0, +/* 10ad - */ 0, +/* 10ae - */ 0, +/* 10af - */ 0, +/* 10b0 - */ 0, +/* 10b1 - _66_0F_38_20 */ 0x249c, +/* 10b2 - */ 0, +/* 10b3 - */ 0, +/* 10b4 - */ 0, +/* 10b5 - _V_66_0F_38_20 */ 0x40f4, +/* 10b6 - */ 0, +/* 10b7 - */ 0, +/* 10b8 - */ 0, +/* 10b9 - */ 0, +/* 10ba - */ 0, +/* 10bb - */ 0, +/* 10bc - */ 0, +/* 10bd - _66_0F_38_21 */ 0x249d, +/* 10be - */ 0, +/* 10bf - */ 0, +/* 10c0 - */ 0, +/* 10c1 - _V_66_0F_38_21 */ 0x40f5, +/* 10c2 - */ 0, +/* 10c3 - */ 0, +/* 10c4 - */ 0, +/* 10c5 - */ 0, +/* 10c6 - */ 0, +/* 10c7 - */ 0, +/* 10c8 - */ 0, +/* 10c9 - _66_0F_38_22 */ 0x249e, +/* 10ca - */ 0, +/* 10cb - */ 0, +/* 10cc - */ 0, +/* 10cd - _V_66_0F_38_22 */ 0x40f6, +/* 10ce - */ 0, +/* 10cf - */ 0, +/* 10d0 - */ 0, +/* 10d1 - */ 0, +/* 10d2 - */ 0, +/* 10d3 - */ 0, +/* 10d4 - */ 0, +/* 10d5 - _66_0F_38_23 */ 0x249f, +/* 10d6 - */ 0, +/* 10d7 - */ 0, +/* 10d8 - */ 0, +/* 10d9 - _V_66_0F_38_23 */ 0x40f7, +/* 10da - */ 0, +/* 10db - */ 0, +/* 10dc - */ 0, +/* 10dd - */ 0, +/* 10de - */ 0, +/* 10df - */ 0, +/* 10e0 - */ 0, +/* 10e1 - _66_0F_38_24 */ 0x24a0, +/* 10e2 - */ 0, +/* 10e3 - */ 0, +/* 10e4 - */ 0, +/* 10e5 - _V_66_0F_38_24 */ 0x40f8, +/* 10e6 - */ 0, +/* 10e7 - */ 0, +/* 10e8 - */ 0, +/* 10e9 - */ 0, +/* 10ea - */ 0, +/* 10eb - */ 0, +/* 10ec - */ 0, +/* 10ed - _66_0F_38_25 */ 0x24a1, +/* 10ee - */ 0, +/* 10ef - */ 0, +/* 10f0 - */ 0, +/* 10f1 - _V_66_0F_38_25 */ 0x40f9, +/* 10f2 - */ 0, +/* 10f3 - */ 0, +/* 10f4 - */ 0, +/* 10f5 - */ 0, +/* 10f6 - */ 0, +/* 10f7 - */ 0, +/* 10f8 - */ 0, +/* 10f9 - _66_0F_38_28 */ 0x24a2, +/* 10fa - */ 0, +/* 10fb - */ 0, +/* 10fc - */ 0, +/* 10fd - _V_66_0F_38_28 */ 0x40fa, +/* 10fe - */ 0, +/* 10ff - */ 0, +/* 1100 - */ 0, +/* 1101 - */ 0, +/* 1102 - */ 0, +/* 1103 - */ 0, +/* 1104 - */ 0, +/* 1105 - _66_0F_38_29 */ 0x24a3, +/* 1106 - */ 0, +/* 1107 - */ 0, +/* 1108 - */ 0, +/* 1109 - _V_66_0F_38_29 */ 0x40fb, +/* 110a - */ 0, +/* 110b - */ 0, +/* 110c - */ 0, +/* 110d - */ 0, +/* 110e - */ 0, +/* 110f - */ 0, +/* 1110 - */ 0, +/* 1111 - _66_0F_38_2A */ 0x24a4, +/* 1112 - */ 0, +/* 1113 - */ 0, +/* 1114 - */ 0, +/* 1115 - _V_66_0F_38_2A */ 0x40fc, +/* 1116 - */ 0, +/* 1117 - */ 0, +/* 1118 - */ 0, +/* 1119 - */ 0, +/* 111a - */ 0, +/* 111b - */ 0, +/* 111c - */ 0, +/* 111d - _66_0F_38_2B */ 0x24a5, +/* 111e - */ 0, +/* 111f - */ 0, +/* 1120 - */ 0, +/* 1121 - _V_66_0F_38_2B */ 0x40fd, +/* 1122 - */ 0, +/* 1123 - */ 0, +/* 1124 - */ 0, +/* 1125 - */ 0, +/* 1126 - */ 0, +/* 1127 - */ 0, +/* 1128 - */ 0, +/* 1129 - */ 0, +/* 112a - */ 0, +/* 112b - */ 0, +/* 112c - */ 0, +/* 112d - _V_66_0F_38_2C */ 0x40fe, +/* 112e - */ 0, +/* 112f - */ 0, +/* 1130 - */ 0, +/* 1131 - */ 0, +/* 1132 - */ 0, +/* 1133 - */ 0, +/* 1134 - */ 0, +/* 1135 - */ 0, +/* 1136 - */ 0, +/* 1137 - */ 0, +/* 1138 - */ 0, +/* 1139 - _V_66_0F_38_2D */ 0x40ff, +/* 113a - */ 0, +/* 113b - */ 0, +/* 113c - */ 0, +/* 113d - */ 0, +/* 113e - */ 0, +/* 113f - */ 0, +/* 1140 - */ 0, +/* 1141 - */ 0, +/* 1142 - */ 0, +/* 1143 - */ 0, +/* 1144 - */ 0, +/* 1145 - _V_66_0F_38_2E */ 0x4100, +/* 1146 - */ 0, +/* 1147 - */ 0, +/* 1148 - */ 0, +/* 1149 - */ 0, +/* 114a - */ 0, +/* 114b - */ 0, +/* 114c - */ 0, +/* 114d - */ 0, +/* 114e - */ 0, +/* 114f - */ 0, +/* 1150 - */ 0, +/* 1151 - _V_66_0F_38_2F */ 0x4101, +/* 1152 - */ 0, +/* 1153 - */ 0, +/* 1154 - */ 0, +/* 1155 - */ 0, +/* 1156 - */ 0, +/* 1157 - */ 0, +/* 1158 - */ 0, +/* 1159 - _66_0F_38_30 */ 0x24a6, +/* 115a - */ 0, +/* 115b - */ 0, +/* 115c - */ 0, +/* 115d - _V_66_0F_38_30 */ 0x4102, +/* 115e - */ 0, +/* 115f - */ 0, +/* 1160 - */ 0, +/* 1161 - */ 0, +/* 1162 - */ 0, +/* 1163 - */ 0, +/* 1164 - */ 0, +/* 1165 - _66_0F_38_31 */ 0x24a7, +/* 1166 - */ 0, +/* 1167 - */ 0, +/* 1168 - */ 0, +/* 1169 - _V_66_0F_38_31 */ 0x4103, +/* 116a - */ 0, +/* 116b - */ 0, +/* 116c - */ 0, +/* 116d - */ 0, +/* 116e - */ 0, +/* 116f - */ 0, +/* 1170 - */ 0, +/* 1171 - _66_0F_38_32 */ 0x24a8, +/* 1172 - */ 0, +/* 1173 - */ 0, +/* 1174 - */ 0, +/* 1175 - _V_66_0F_38_32 */ 0x4104, +/* 1176 - */ 0, +/* 1177 - */ 0, +/* 1178 - */ 0, +/* 1179 - */ 0, +/* 117a - */ 0, +/* 117b - */ 0, +/* 117c - */ 0, +/* 117d - _66_0F_38_33 */ 0x24a9, +/* 117e - */ 0, +/* 117f - */ 0, +/* 1180 - */ 0, +/* 1181 - _V_66_0F_38_33 */ 0x4105, +/* 1182 - */ 0, +/* 1183 - */ 0, +/* 1184 - */ 0, +/* 1185 - */ 0, +/* 1186 - */ 0, +/* 1187 - */ 0, +/* 1188 - */ 0, +/* 1189 - _66_0F_38_34 */ 0x24aa, +/* 118a - */ 0, +/* 118b - */ 0, +/* 118c - */ 0, +/* 118d - _V_66_0F_38_34 */ 0x4106, +/* 118e - */ 0, +/* 118f - */ 0, +/* 1190 - */ 0, +/* 1191 - */ 0, +/* 1192 - */ 0, +/* 1193 - */ 0, +/* 1194 - */ 0, +/* 1195 - _66_0F_38_35 */ 0x24ab, +/* 1196 - */ 0, +/* 1197 - */ 0, +/* 1198 - */ 0, +/* 1199 - _V_66_0F_38_35 */ 0x4107, +/* 119a - */ 0, +/* 119b - */ 0, +/* 119c - */ 0, +/* 119d - */ 0, +/* 119e - */ 0, +/* 119f - */ 0, +/* 11a0 - */ 0, +/* 11a1 - _66_0F_38_37 */ 0x24ac, +/* 11a2 - */ 0, +/* 11a3 - */ 0, +/* 11a4 - */ 0, +/* 11a5 - _V_66_0F_38_37 */ 0x4108, +/* 11a6 - */ 0, +/* 11a7 - */ 0, +/* 11a8 - */ 0, +/* 11a9 - */ 0, +/* 11aa - */ 0, +/* 11ab - */ 0, +/* 11ac - */ 0, +/* 11ad - _66_0F_38_38 */ 0x24ad, +/* 11ae - */ 0, +/* 11af - */ 0, +/* 11b0 - */ 0, +/* 11b1 - _V_66_0F_38_38 */ 0x4109, +/* 11b2 - */ 0, +/* 11b3 - */ 0, +/* 11b4 - */ 0, +/* 11b5 - */ 0, +/* 11b6 - */ 0, +/* 11b7 - */ 0, +/* 11b8 - */ 0, +/* 11b9 - _66_0F_38_39 */ 0x24ae, +/* 11ba - */ 0, +/* 11bb - */ 0, +/* 11bc - */ 0, +/* 11bd - _V_66_0F_38_39 */ 0x410a, +/* 11be - */ 0, +/* 11bf - */ 0, +/* 11c0 - */ 0, +/* 11c1 - */ 0, +/* 11c2 - */ 0, +/* 11c3 - */ 0, +/* 11c4 - */ 0, +/* 11c5 - _66_0F_38_3A */ 0x24af, +/* 11c6 - */ 0, +/* 11c7 - */ 0, +/* 11c8 - */ 0, +/* 11c9 - _V_66_0F_38_3A */ 0x410b, +/* 11ca - */ 0, +/* 11cb - */ 0, +/* 11cc - */ 0, +/* 11cd - */ 0, +/* 11ce - */ 0, +/* 11cf - */ 0, +/* 11d0 - */ 0, +/* 11d1 - _66_0F_38_3B */ 0x24b0, +/* 11d2 - */ 0, +/* 11d3 - */ 0, +/* 11d4 - */ 0, +/* 11d5 - _V_66_0F_38_3B */ 0x410c, +/* 11d6 - */ 0, +/* 11d7 - */ 0, +/* 11d8 - */ 0, +/* 11d9 - */ 0, +/* 11da - */ 0, +/* 11db - */ 0, +/* 11dc - */ 0, +/* 11dd - _66_0F_38_3C */ 0x24b1, +/* 11de - */ 0, +/* 11df - */ 0, +/* 11e0 - */ 0, +/* 11e1 - _V_66_0F_38_3C */ 0x410d, +/* 11e2 - */ 0, +/* 11e3 - */ 0, +/* 11e4 - */ 0, +/* 11e5 - */ 0, +/* 11e6 - */ 0, +/* 11e7 - */ 0, +/* 11e8 - */ 0, +/* 11e9 - _66_0F_38_3D */ 0x24b2, +/* 11ea - */ 0, +/* 11eb - */ 0, +/* 11ec - */ 0, +/* 11ed - _V_66_0F_38_3D */ 0x410e, +/* 11ee - */ 0, +/* 11ef - */ 0, +/* 11f0 - */ 0, +/* 11f1 - */ 0, +/* 11f2 - */ 0, +/* 11f3 - */ 0, +/* 11f4 - */ 0, +/* 11f5 - _66_0F_38_3E */ 0x24b3, +/* 11f6 - */ 0, +/* 11f7 - */ 0, +/* 11f8 - */ 0, +/* 11f9 - _V_66_0F_38_3E */ 0x410f, +/* 11fa - */ 0, +/* 11fb - */ 0, +/* 11fc - */ 0, +/* 11fd - */ 0, +/* 11fe - */ 0, +/* 11ff - */ 0, +/* 1200 - */ 0, +/* 1201 - _66_0F_38_3F */ 0x24b4, +/* 1202 - */ 0, +/* 1203 - */ 0, +/* 1204 - */ 0, +/* 1205 - _V_66_0F_38_3F */ 0x4110, +/* 1206 - */ 0, +/* 1207 - */ 0, +/* 1208 - */ 0, +/* 1209 - */ 0, +/* 120a - */ 0, +/* 120b - */ 0, +/* 120c - */ 0, +/* 120d - _66_0F_38_40 */ 0x24b5, +/* 120e - */ 0, +/* 120f - */ 0, +/* 1210 - */ 0, +/* 1211 - _V_66_0F_38_40 */ 0x4111, +/* 1212 - */ 0, +/* 1213 - */ 0, +/* 1214 - */ 0, +/* 1215 - */ 0, +/* 1216 - */ 0, +/* 1217 - */ 0, +/* 1218 - */ 0, +/* 1219 - _66_0F_38_41 */ 0x24b6, +/* 121a - */ 0, +/* 121b - */ 0, +/* 121c - */ 0, +/* 121d - _V_66_0F_38_41 */ 0x4112, +/* 121e - */ 0, +/* 121f - */ 0, +/* 1220 - */ 0, +/* 1221 - */ 0, +/* 1222 - */ 0, +/* 1223 - */ 0, +/* 1224 - */ 0, +/* 1225 - _66_0F_38_80 */ 0x24b7, +/* 1226 - */ 0, +/* 1227 - */ 0, +/* 1228 - */ 0, +/* 1229 - */ 0, +/* 122a - */ 0, +/* 122b - */ 0, +/* 122c - */ 0, +/* 122d - */ 0, +/* 122e - */ 0, +/* 122f - */ 0, +/* 1230 - */ 0, +/* 1231 - _66_0F_38_81 */ 0x24b8, +/* 1232 - */ 0, +/* 1233 - */ 0, +/* 1234 - */ 0, +/* 1235 - */ 0, +/* 1236 - */ 0, +/* 1237 - */ 0, +/* 1238 - */ 0, +/* 1239 - */ 0, +/* 123a - */ 0, +/* 123b - */ 0, +/* 123c - */ 0, +/* 123d - _66_0F_38_82 */ 0x24b9, +/* 123e - */ 0, +/* 123f - */ 0, +/* 1240 - */ 0, +/* 1241 - */ 0, +/* 1242 - */ 0, +/* 1243 - */ 0, +/* 1244 - */ 0, +/* 1245 - */ 0, +/* 1246 - */ 0, +/* 1247 - */ 0, +/* 1248 - */ 0, +/* 1249 - */ 0, +/* 124a - */ 0, +/* 124b - */ 0, +/* 124c - */ 0, +/* 124d - _V_66_0F_38_96 */ 0x4113, +/* 124e - */ 0, +/* 124f - */ 0, +/* 1250 - */ 0, +/* 1251 - */ 0, +/* 1252 - */ 0, +/* 1253 - */ 0, +/* 1254 - */ 0, +/* 1255 - */ 0, +/* 1256 - */ 0, +/* 1257 - */ 0, +/* 1258 - */ 0, +/* 1259 - _V_66_0F_38_97 */ 0x4114, +/* 125a - */ 0, +/* 125b - */ 0, +/* 125c - */ 0, +/* 125d - */ 0, +/* 125e - */ 0, +/* 125f - */ 0, +/* 1260 - */ 0, +/* 1261 - */ 0, +/* 1262 - */ 0, +/* 1263 - */ 0, +/* 1264 - */ 0, +/* 1265 - _V_66_0F_38_98 */ 0x4115, +/* 1266 - */ 0, +/* 1267 - */ 0, +/* 1268 - */ 0, +/* 1269 - */ 0, +/* 126a - */ 0, +/* 126b - */ 0, +/* 126c - */ 0, +/* 126d - */ 0, +/* 126e - */ 0, +/* 126f - */ 0, +/* 1270 - */ 0, +/* 1271 - _V_66_0F_38_99 */ 0x4116, +/* 1272 - */ 0, +/* 1273 - */ 0, +/* 1274 - */ 0, +/* 1275 - */ 0, +/* 1276 - */ 0, +/* 1277 - */ 0, +/* 1278 - */ 0, +/* 1279 - */ 0, +/* 127a - */ 0, +/* 127b - */ 0, +/* 127c - */ 0, +/* 127d - _V_66_0F_38_9A */ 0x4117, +/* 127e - */ 0, +/* 127f - */ 0, +/* 1280 - */ 0, +/* 1281 - */ 0, +/* 1282 - */ 0, +/* 1283 - */ 0, +/* 1284 - */ 0, +/* 1285 - */ 0, +/* 1286 - */ 0, +/* 1287 - */ 0, +/* 1288 - */ 0, +/* 1289 - _V_66_0F_38_9B */ 0x4118, +/* 128a - */ 0, +/* 128b - */ 0, +/* 128c - */ 0, +/* 128d - */ 0, +/* 128e - */ 0, +/* 128f - */ 0, +/* 1290 - */ 0, +/* 1291 - */ 0, +/* 1292 - */ 0, +/* 1293 - */ 0, +/* 1294 - */ 0, +/* 1295 - _V_66_0F_38_9C */ 0x4119, +/* 1296 - */ 0, +/* 1297 - */ 0, +/* 1298 - */ 0, +/* 1299 - */ 0, +/* 129a - */ 0, +/* 129b - */ 0, +/* 129c - */ 0, +/* 129d - */ 0, +/* 129e - */ 0, +/* 129f - */ 0, +/* 12a0 - */ 0, +/* 12a1 - _V_66_0F_38_9D */ 0x411a, +/* 12a2 - */ 0, +/* 12a3 - */ 0, +/* 12a4 - */ 0, +/* 12a5 - */ 0, +/* 12a6 - */ 0, +/* 12a7 - */ 0, +/* 12a8 - */ 0, +/* 12a9 - */ 0, +/* 12aa - */ 0, +/* 12ab - */ 0, +/* 12ac - */ 0, +/* 12ad - _V_66_0F_38_9E */ 0x411b, +/* 12ae - */ 0, +/* 12af - */ 0, +/* 12b0 - */ 0, +/* 12b1 - */ 0, +/* 12b2 - */ 0, +/* 12b3 - */ 0, +/* 12b4 - */ 0, +/* 12b5 - */ 0, +/* 12b6 - */ 0, +/* 12b7 - */ 0, +/* 12b8 - */ 0, +/* 12b9 - _V_66_0F_38_9F */ 0x411c, +/* 12ba - */ 0, +/* 12bb - */ 0, +/* 12bc - */ 0, +/* 12bd - */ 0, +/* 12be - */ 0, +/* 12bf - */ 0, +/* 12c0 - */ 0, +/* 12c1 - */ 0, +/* 12c2 - */ 0, +/* 12c3 - */ 0, +/* 12c4 - */ 0, +/* 12c5 - _V_66_0F_38_A6 */ 0x411d, +/* 12c6 - */ 0, +/* 12c7 - */ 0, +/* 12c8 - */ 0, +/* 12c9 - */ 0, +/* 12ca - */ 0, +/* 12cb - */ 0, +/* 12cc - */ 0, +/* 12cd - */ 0, +/* 12ce - */ 0, +/* 12cf - */ 0, +/* 12d0 - */ 0, +/* 12d1 - _V_66_0F_38_A7 */ 0x411e, +/* 12d2 - */ 0, +/* 12d3 - */ 0, +/* 12d4 - */ 0, +/* 12d5 - */ 0, +/* 12d6 - */ 0, +/* 12d7 - */ 0, +/* 12d8 - */ 0, +/* 12d9 - */ 0, +/* 12da - */ 0, +/* 12db - */ 0, +/* 12dc - */ 0, +/* 12dd - _V_66_0F_38_A8 */ 0x411f, +/* 12de - */ 0, +/* 12df - */ 0, +/* 12e0 - */ 0, +/* 12e1 - */ 0, +/* 12e2 - */ 0, +/* 12e3 - */ 0, +/* 12e4 - */ 0, +/* 12e5 - */ 0, +/* 12e6 - */ 0, +/* 12e7 - */ 0, +/* 12e8 - */ 0, +/* 12e9 - _V_66_0F_38_A9 */ 0x4120, +/* 12ea - */ 0, +/* 12eb - */ 0, +/* 12ec - */ 0, +/* 12ed - */ 0, +/* 12ee - */ 0, +/* 12ef - */ 0, +/* 12f0 - */ 0, +/* 12f1 - */ 0, +/* 12f2 - */ 0, +/* 12f3 - */ 0, +/* 12f4 - */ 0, +/* 12f5 - _V_66_0F_38_AA */ 0x4121, +/* 12f6 - */ 0, +/* 12f7 - */ 0, +/* 12f8 - */ 0, +/* 12f9 - */ 0, +/* 12fa - */ 0, +/* 12fb - */ 0, +/* 12fc - */ 0, +/* 12fd - */ 0, +/* 12fe - */ 0, +/* 12ff - */ 0, +/* 1300 - */ 0, +/* 1301 - _V_66_0F_38_AB */ 0x4122, +/* 1302 - */ 0, +/* 1303 - */ 0, +/* 1304 - */ 0, +/* 1305 - */ 0, +/* 1306 - */ 0, +/* 1307 - */ 0, +/* 1308 - */ 0, +/* 1309 - */ 0, +/* 130a - */ 0, +/* 130b - */ 0, +/* 130c - */ 0, +/* 130d - _V_66_0F_38_AC */ 0x4123, +/* 130e - */ 0, +/* 130f - */ 0, +/* 1310 - */ 0, +/* 1311 - */ 0, +/* 1312 - */ 0, +/* 1313 - */ 0, +/* 1314 - */ 0, +/* 1315 - */ 0, +/* 1316 - */ 0, +/* 1317 - */ 0, +/* 1318 - */ 0, +/* 1319 - _V_66_0F_38_AD */ 0x4124, +/* 131a - */ 0, +/* 131b - */ 0, +/* 131c - */ 0, +/* 131d - */ 0, +/* 131e - */ 0, +/* 131f - */ 0, +/* 1320 - */ 0, +/* 1321 - */ 0, +/* 1322 - */ 0, +/* 1323 - */ 0, +/* 1324 - */ 0, +/* 1325 - _V_66_0F_38_AE */ 0x4125, +/* 1326 - */ 0, +/* 1327 - */ 0, +/* 1328 - */ 0, +/* 1329 - */ 0, +/* 132a - */ 0, +/* 132b - */ 0, +/* 132c - */ 0, +/* 132d - */ 0, +/* 132e - */ 0, +/* 132f - */ 0, +/* 1330 - */ 0, +/* 1331 - _V_66_0F_38_AF */ 0x4126, +/* 1332 - */ 0, +/* 1333 - */ 0, +/* 1334 - */ 0, +/* 1335 - */ 0, +/* 1336 - */ 0, +/* 1337 - */ 0, +/* 1338 - */ 0, +/* 1339 - */ 0, +/* 133a - */ 0, +/* 133b - */ 0, +/* 133c - */ 0, +/* 133d - _V_66_0F_38_B6 */ 0x4127, +/* 133e - */ 0, +/* 133f - */ 0, +/* 1340 - */ 0, +/* 1341 - */ 0, +/* 1342 - */ 0, +/* 1343 - */ 0, +/* 1344 - */ 0, +/* 1345 - */ 0, +/* 1346 - */ 0, +/* 1347 - */ 0, +/* 1348 - */ 0, +/* 1349 - _V_66_0F_38_B7 */ 0x4128, +/* 134a - */ 0, +/* 134b - */ 0, +/* 134c - */ 0, +/* 134d - */ 0, +/* 134e - */ 0, +/* 134f - */ 0, +/* 1350 - */ 0, +/* 1351 - */ 0, +/* 1352 - */ 0, +/* 1353 - */ 0, +/* 1354 - */ 0, +/* 1355 - _V_66_0F_38_B8 */ 0x4129, +/* 1356 - */ 0, +/* 1357 - */ 0, +/* 1358 - */ 0, +/* 1359 - */ 0, +/* 135a - */ 0, +/* 135b - */ 0, +/* 135c - */ 0, +/* 135d - */ 0, +/* 135e - */ 0, +/* 135f - */ 0, +/* 1360 - */ 0, +/* 1361 - _V_66_0F_38_B9 */ 0x412a, +/* 1362 - */ 0, +/* 1363 - */ 0, +/* 1364 - */ 0, +/* 1365 - */ 0, +/* 1366 - */ 0, +/* 1367 - */ 0, +/* 1368 - */ 0, +/* 1369 - */ 0, +/* 136a - */ 0, +/* 136b - */ 0, +/* 136c - */ 0, +/* 136d - _V_66_0F_38_BA */ 0x412b, +/* 136e - */ 0, +/* 136f - */ 0, +/* 1370 - */ 0, +/* 1371 - */ 0, +/* 1372 - */ 0, +/* 1373 - */ 0, +/* 1374 - */ 0, +/* 1375 - */ 0, +/* 1376 - */ 0, +/* 1377 - */ 0, +/* 1378 - */ 0, +/* 1379 - _V_66_0F_38_BB */ 0x412c, +/* 137a - */ 0, +/* 137b - */ 0, +/* 137c - */ 0, +/* 137d - */ 0, +/* 137e - */ 0, +/* 137f - */ 0, +/* 1380 - */ 0, +/* 1381 - */ 0, +/* 1382 - */ 0, +/* 1383 - */ 0, +/* 1384 - */ 0, +/* 1385 - _V_66_0F_38_BC */ 0x412d, +/* 1386 - */ 0, +/* 1387 - */ 0, +/* 1388 - */ 0, +/* 1389 - */ 0, +/* 138a - */ 0, +/* 138b - */ 0, +/* 138c - */ 0, +/* 138d - */ 0, +/* 138e - */ 0, +/* 138f - */ 0, +/* 1390 - */ 0, +/* 1391 - _V_66_0F_38_BD */ 0x412e, +/* 1392 - */ 0, +/* 1393 - */ 0, +/* 1394 - */ 0, +/* 1395 - */ 0, +/* 1396 - */ 0, +/* 1397 - */ 0, +/* 1398 - */ 0, +/* 1399 - */ 0, +/* 139a - */ 0, +/* 139b - */ 0, +/* 139c - */ 0, +/* 139d - _V_66_0F_38_BE */ 0x412f, +/* 139e - */ 0, +/* 139f - */ 0, +/* 13a0 - */ 0, +/* 13a1 - */ 0, +/* 13a2 - */ 0, +/* 13a3 - */ 0, +/* 13a4 - */ 0, +/* 13a5 - */ 0, +/* 13a6 - */ 0, +/* 13a7 - */ 0, +/* 13a8 - */ 0, +/* 13a9 - _V_66_0F_38_BF */ 0x4130, +/* 13aa - */ 0, +/* 13ab - */ 0, +/* 13ac - */ 0, +/* 13ad - */ 0, +/* 13ae - */ 0, +/* 13af - */ 0, +/* 13b0 - */ 0, +/* 13b1 - _66_0F_38_DB */ 0x24ba, +/* 13b2 - */ 0, +/* 13b3 - */ 0, +/* 13b4 - */ 0, +/* 13b5 - _V_66_0F_38_DB */ 0x4131, +/* 13b6 - */ 0, +/* 13b7 - */ 0, +/* 13b8 - */ 0, +/* 13b9 - */ 0, +/* 13ba - */ 0, +/* 13bb - */ 0, +/* 13bc - */ 0, +/* 13bd - _66_0F_38_DC */ 0x24bb, +/* 13be - */ 0, +/* 13bf - */ 0, +/* 13c0 - */ 0, +/* 13c1 - _V_66_0F_38_DC */ 0x4132, +/* 13c2 - */ 0, +/* 13c3 - */ 0, +/* 13c4 - */ 0, +/* 13c5 - */ 0, +/* 13c6 - */ 0, +/* 13c7 - */ 0, +/* 13c8 - */ 0, +/* 13c9 - _66_0F_38_DD */ 0x24bc, +/* 13ca - */ 0, +/* 13cb - */ 0, +/* 13cc - */ 0, +/* 13cd - _V_66_0F_38_DD */ 0x4133, +/* 13ce - */ 0, +/* 13cf - */ 0, +/* 13d0 - */ 0, +/* 13d1 - */ 0, +/* 13d2 - */ 0, +/* 13d3 - */ 0, +/* 13d4 - */ 0, +/* 13d5 - _66_0F_38_DE */ 0x24bd, +/* 13d6 - */ 0, +/* 13d7 - */ 0, +/* 13d8 - */ 0, +/* 13d9 - _V_66_0F_38_DE */ 0x4134, +/* 13da - */ 0, +/* 13db - */ 0, +/* 13dc - */ 0, +/* 13dd - */ 0, +/* 13de - */ 0, +/* 13df - */ 0, +/* 13e0 - */ 0, +/* 13e1 - _66_0F_38_DF */ 0x24be, +/* 13e2 - */ 0, +/* 13e3 - */ 0, +/* 13e4 - */ 0, +/* 13e5 - _V_66_0F_38_DF */ 0x4135, +/* 13e6 - */ 0, +/* 13e7 - */ 0, +/* 13e8 - */ 0, +/* 13e9 - */ 0, +/* 13ea - */ 0, +/* 13eb - */ 0, +/* 13ec - _0F_38_F0 */ 0x24bf, +/* 13ed - */ 0, +/* 13ee - */ 0, +/* 13ef - _F2_0F_38_F0 */ 0x24c0, +/* 13f0 - */ 0, +/* 13f1 - */ 0, +/* 13f2 - */ 0, +/* 13f3 - */ 0, +/* 13f4 - */ 0, +/* 13f5 - */ 0, +/* 13f6 - */ 0, +/* 13f7 - */ 0, +/* 13f8 - _0F_38_F1 */ 0x24c1, +/* 13f9 - */ 0, +/* 13fa - */ 0, +/* 13fb - _F2_0F_38_F1 */ 0x24c2, +/* 13fc - */ 0, +/* 13fd - */ 0, +/* 13fe - */ 0, +/* 13ff - */ 0, +/* 1400 - */ 0, +/* 1401 - */ 0, +/* 1402 - */ 0, +/* 1403 - */ 0, +/* 1404 - */ 0, +/* 1405 - */ 0, +/* 1406 - */ 0, +/* 1407 - */ 0, +/* 1408 - */ 0, +/* 1409 - _V_66_0F_3A_04 */ 0x4136, +/* 140a - */ 0, +/* 140b - */ 0, +/* 140c - */ 0, +/* 140d - */ 0, +/* 140e - */ 0, +/* 140f - */ 0, +/* 1410 - */ 0, +/* 1411 - */ 0, +/* 1412 - */ 0, +/* 1413 - */ 0, +/* 1414 - */ 0, +/* 1415 - _V_66_0F_3A_05 */ 0x4137, +/* 1416 - */ 0, +/* 1417 - */ 0, +/* 1418 - */ 0, +/* 1419 - */ 0, +/* 141a - */ 0, +/* 141b - */ 0, +/* 141c - */ 0, +/* 141d - */ 0, +/* 141e - */ 0, +/* 141f - */ 0, +/* 1420 - */ 0, +/* 1421 - _V_66_0F_3A_06 */ 0x4138, +/* 1422 - */ 0, +/* 1423 - */ 0, +/* 1424 - */ 0, +/* 1425 - */ 0, +/* 1426 - */ 0, +/* 1427 - */ 0, +/* 1428 - */ 0, +/* 1429 - _66_0F_3A_08 */ 0x4139, +/* 142a - */ 0, +/* 142b - */ 0, +/* 142c - */ 0, +/* 142d - _V_66_0F_3A_08 */ 0x413a, +/* 142e - */ 0, +/* 142f - */ 0, +/* 1430 - */ 0, +/* 1431 - */ 0, +/* 1432 - */ 0, +/* 1433 - */ 0, +/* 1434 - */ 0, +/* 1435 - _66_0F_3A_09 */ 0x413b, +/* 1436 - */ 0, +/* 1437 - */ 0, +/* 1438 - */ 0, +/* 1439 - _V_66_0F_3A_09 */ 0x413c, +/* 143a - */ 0, +/* 143b - */ 0, +/* 143c - */ 0, +/* 143d - */ 0, +/* 143e - */ 0, +/* 143f - */ 0, +/* 1440 - */ 0, +/* 1441 - _66_0F_3A_0A */ 0x413d, +/* 1442 - */ 0, +/* 1443 - */ 0, +/* 1444 - */ 0, +/* 1445 - _V_66_0F_3A_0A */ 0x413e, +/* 1446 - */ 0, +/* 1447 - */ 0, +/* 1448 - */ 0, +/* 1449 - */ 0, +/* 144a - */ 0, +/* 144b - */ 0, +/* 144c - */ 0, +/* 144d - _66_0F_3A_0B */ 0x413f, +/* 144e - */ 0, +/* 144f - */ 0, +/* 1450 - */ 0, +/* 1451 - _V_66_0F_3A_0B */ 0x4140, +/* 1452 - */ 0, +/* 1453 - */ 0, +/* 1454 - */ 0, +/* 1455 - */ 0, +/* 1456 - */ 0, +/* 1457 - */ 0, +/* 1458 - */ 0, +/* 1459 - _66_0F_3A_0C */ 0x4141, +/* 145a - */ 0, +/* 145b - */ 0, +/* 145c - */ 0, +/* 145d - _V_66_0F_3A_0C */ 0x4142, +/* 145e - */ 0, +/* 145f - */ 0, +/* 1460 - */ 0, +/* 1461 - */ 0, +/* 1462 - */ 0, +/* 1463 - */ 0, +/* 1464 - */ 0, +/* 1465 - _66_0F_3A_0D */ 0x4143, +/* 1466 - */ 0, +/* 1467 - */ 0, +/* 1468 - */ 0, +/* 1469 - _V_66_0F_3A_0D */ 0x4144, +/* 146a - */ 0, +/* 146b - */ 0, +/* 146c - */ 0, +/* 146d - */ 0, +/* 146e - */ 0, +/* 146f - */ 0, +/* 1470 - */ 0, +/* 1471 - _66_0F_3A_0E */ 0x4145, +/* 1472 - */ 0, +/* 1473 - */ 0, +/* 1474 - */ 0, +/* 1475 - _V_66_0F_3A_0E */ 0x4146, +/* 1476 - */ 0, +/* 1477 - */ 0, +/* 1478 - */ 0, +/* 1479 - */ 0, +/* 147a - */ 0, +/* 147b - */ 0, +/* 147c - _0F_3A_0F */ 0x4147, +/* 147d - _66_0F_3A_0F */ 0x4148, +/* 147e - */ 0, +/* 147f - */ 0, +/* 1480 - */ 0, +/* 1481 - _V_66_0F_3A_0F */ 0x4149, +/* 1482 - */ 0, +/* 1483 - */ 0, +/* 1484 - */ 0, +/* 1485 - */ 0, +/* 1486 - */ 0, +/* 1487 - */ 0, +/* 1488 - */ 0, +/* 1489 - _66_0F_3A_14 */ 0x414a, +/* 148a - */ 0, +/* 148b - */ 0, +/* 148c - */ 0, +/* 148d - _V_66_0F_3A_14 */ 0x414b, +/* 148e - */ 0, +/* 148f - */ 0, +/* 1490 - */ 0, +/* 1491 - */ 0, +/* 1492 - */ 0, +/* 1493 - */ 0, +/* 1494 - */ 0, +/* 1495 - _66_0F_3A_15 */ 0x414c, +/* 1496 - */ 0, +/* 1497 - */ 0, +/* 1498 - */ 0, +/* 1499 - _V_66_0F_3A_15 */ 0x414d, +/* 149a - */ 0, +/* 149b - */ 0, +/* 149c - */ 0, +/* 149d - */ 0, +/* 149e - */ 0, +/* 149f - */ 0, +/* 14a0 - */ 0, +/* 14a1 - _66_0F_3A_16 */ 0x414e, +/* 14a2 - */ 0, +/* 14a3 - */ 0, +/* 14a4 - */ 0, +/* 14a5 - _V_66_0F_3A_16 */ 0x414f, +/* 14a6 - */ 0, +/* 14a7 - */ 0, +/* 14a8 - */ 0, +/* 14a9 - */ 0, +/* 14aa - */ 0, +/* 14ab - */ 0, +/* 14ac - */ 0, +/* 14ad - _66_0F_3A_17 */ 0x4150, +/* 14ae - */ 0, +/* 14af - */ 0, +/* 14b0 - */ 0, +/* 14b1 - _V_66_0F_3A_17 */ 0x4151, +/* 14b2 - */ 0, +/* 14b3 - */ 0, +/* 14b4 - */ 0, +/* 14b5 - */ 0, +/* 14b6 - */ 0, +/* 14b7 - */ 0, +/* 14b8 - */ 0, +/* 14b9 - */ 0, +/* 14ba - */ 0, +/* 14bb - */ 0, +/* 14bc - */ 0, +/* 14bd - _V_66_0F_3A_18 */ 0x4152, +/* 14be - */ 0, +/* 14bf - */ 0, +/* 14c0 - */ 0, +/* 14c1 - */ 0, +/* 14c2 - */ 0, +/* 14c3 - */ 0, +/* 14c4 - */ 0, +/* 14c5 - */ 0, +/* 14c6 - */ 0, +/* 14c7 - */ 0, +/* 14c8 - */ 0, +/* 14c9 - _V_66_0F_3A_19 */ 0x4153, +/* 14ca - */ 0, +/* 14cb - */ 0, +/* 14cc - */ 0, +/* 14cd - */ 0, +/* 14ce - */ 0, +/* 14cf - */ 0, +/* 14d0 - */ 0, +/* 14d1 - _66_0F_3A_20 */ 0x4154, +/* 14d2 - */ 0, +/* 14d3 - */ 0, +/* 14d4 - */ 0, +/* 14d5 - _V_66_0F_3A_20 */ 0x4155, +/* 14d6 - */ 0, +/* 14d7 - */ 0, +/* 14d8 - */ 0, +/* 14d9 - */ 0, +/* 14da - */ 0, +/* 14db - */ 0, +/* 14dc - */ 0, +/* 14dd - _66_0F_3A_21 */ 0x4156, +/* 14de - */ 0, +/* 14df - */ 0, +/* 14e0 - */ 0, +/* 14e1 - _V_66_0F_3A_21 */ 0x4157, +/* 14e2 - */ 0, +/* 14e3 - */ 0, +/* 14e4 - */ 0, +/* 14e5 - */ 0, +/* 14e6 - */ 0, +/* 14e7 - */ 0, +/* 14e8 - */ 0, +/* 14e9 - _66_0F_3A_22 */ 0x4158, +/* 14ea - */ 0, +/* 14eb - */ 0, +/* 14ec - */ 0, +/* 14ed - _V_66_0F_3A_22 */ 0x4159, +/* 14ee - */ 0, +/* 14ef - */ 0, +/* 14f0 - */ 0, +/* 14f1 - */ 0, +/* 14f2 - */ 0, +/* 14f3 - */ 0, +/* 14f4 - */ 0, +/* 14f5 - _66_0F_3A_40 */ 0x415a, +/* 14f6 - */ 0, +/* 14f7 - */ 0, +/* 14f8 - */ 0, +/* 14f9 - _V_66_0F_3A_40 */ 0x415b, +/* 14fa - */ 0, +/* 14fb - */ 0, +/* 14fc - */ 0, +/* 14fd - */ 0, +/* 14fe - */ 0, +/* 14ff - */ 0, +/* 1500 - */ 0, +/* 1501 - _66_0F_3A_41 */ 0x415c, +/* 1502 - */ 0, +/* 1503 - */ 0, +/* 1504 - */ 0, +/* 1505 - _V_66_0F_3A_41 */ 0x415d, +/* 1506 - */ 0, +/* 1507 - */ 0, +/* 1508 - */ 0, +/* 1509 - */ 0, +/* 150a - */ 0, +/* 150b - */ 0, +/* 150c - */ 0, +/* 150d - _66_0F_3A_42 */ 0x415e, +/* 150e - */ 0, +/* 150f - */ 0, +/* 1510 - */ 0, +/* 1511 - _V_66_0F_3A_42 */ 0x415f, +/* 1512 - */ 0, +/* 1513 - */ 0, +/* 1514 - */ 0, +/* 1515 - */ 0, +/* 1516 - */ 0, +/* 1517 - */ 0, +/* 1518 - */ 0, +/* 1519 - _66_0F_3A_44 */ 0x4160, +/* 151a - */ 0, +/* 151b - */ 0, +/* 151c - */ 0, +/* 151d - _V_66_0F_3A_44 */ 0x4161, +/* 151e - */ 0, +/* 151f - */ 0, +/* 1520 - */ 0, +/* 1521 - */ 0, +/* 1522 - */ 0, +/* 1523 - */ 0, +/* 1524 - */ 0, +/* 1525 - */ 0, +/* 1526 - */ 0, +/* 1527 - */ 0, +/* 1528 - */ 0, +/* 1529 - _V_66_0F_3A_4A */ 0x4162, +/* 152a - */ 0, +/* 152b - */ 0, +/* 152c - */ 0, +/* 152d - */ 0, +/* 152e - */ 0, +/* 152f - */ 0, +/* 1530 - */ 0, +/* 1531 - */ 0, +/* 1532 - */ 0, +/* 1533 - */ 0, +/* 1534 - */ 0, +/* 1535 - _V_66_0F_3A_4B */ 0x4163, +/* 1536 - */ 0, +/* 1537 - */ 0, +/* 1538 - */ 0, +/* 1539 - */ 0, +/* 153a - */ 0, +/* 153b - */ 0, +/* 153c - */ 0, +/* 153d - */ 0, +/* 153e - */ 0, +/* 153f - */ 0, +/* 1540 - */ 0, +/* 1541 - _V_66_0F_3A_4C */ 0x4164, +/* 1542 - */ 0, +/* 1543 - */ 0, +/* 1544 - */ 0, +/* 1545 - */ 0, +/* 1546 - */ 0, +/* 1547 - */ 0, +/* 1548 - */ 0, +/* 1549 - _66_0F_3A_60 */ 0x4165, +/* 154a - */ 0, +/* 154b - */ 0, +/* 154c - */ 0, +/* 154d - _V_66_0F_3A_60 */ 0x4166, +/* 154e - */ 0, +/* 154f - */ 0, +/* 1550 - */ 0, +/* 1551 - */ 0, +/* 1552 - */ 0, +/* 1553 - */ 0, +/* 1554 - */ 0, +/* 1555 - _66_0F_3A_61 */ 0x4167, +/* 1556 - */ 0, +/* 1557 - */ 0, +/* 1558 - */ 0, +/* 1559 - _V_66_0F_3A_61 */ 0x4168, +/* 155a - */ 0, +/* 155b - */ 0, +/* 155c - */ 0, +/* 155d - */ 0, +/* 155e - */ 0, +/* 155f - */ 0, +/* 1560 - */ 0, +/* 1561 - _66_0F_3A_62 */ 0x4169, +/* 1562 - */ 0, +/* 1563 - */ 0, +/* 1564 - */ 0, +/* 1565 - _V_66_0F_3A_62 */ 0x416a, +/* 1566 - */ 0, +/* 1567 - */ 0, +/* 1568 - */ 0, +/* 1569 - */ 0, +/* 156a - */ 0, +/* 156b - */ 0, +/* 156c - */ 0, +/* 156d - _66_0F_3A_63 */ 0x416b, +/* 156e - */ 0, +/* 156f - */ 0, +/* 1570 - */ 0, +/* 1571 - _V_66_0F_3A_63 */ 0x416c, +/* 1572 - */ 0, +/* 1573 - */ 0, +/* 1574 - */ 0, +/* 1575 - */ 0, +/* 1576 - */ 0, +/* 1577 - */ 0, +/* 1578 - */ 0, +/* 1579 - _66_0F_3A_DF */ 0x416d, +/* 157a - */ 0, +/* 157b - */ 0, +/* 157c - */ 0, +/* 157d - _V_66_0F_3A_DF */ 0x416e, +/* 157e - */ 0, +/* 157f - */ 0, +/* 1580 - */ 0, +/* 1581 - */ 0, +/* 1582 - */ 0, +/* 1583 - */ 0, +/* 1584 - _0F_71_02 */ 0x24c3, +/* 1585 - _66_0F_71_02 */ 0x24c4, +/* 1586 - */ 0, +/* 1587 - */ 0, +/* 1588 - */ 0, +/* 1589 - _V_66_0F_71_02 */ 0x416f, +/* 158a - */ 0, +/* 158b - */ 0, +/* 158c - */ 0, +/* 158d - */ 0, +/* 158e - */ 0, +/* 158f - */ 0, +/* 1590 - _0F_71_04 */ 0x24c5, +/* 1591 - _66_0F_71_04 */ 0x24c6, +/* 1592 - */ 0, +/* 1593 - */ 0, +/* 1594 - */ 0, +/* 1595 - _V_66_0F_71_04 */ 0x4170, +/* 1596 - */ 0, +/* 1597 - */ 0, +/* 1598 - */ 0, +/* 1599 - */ 0, +/* 159a - */ 0, +/* 159b - */ 0, +/* 159c - _0F_71_06 */ 0x24c7, +/* 159d - _66_0F_71_06 */ 0x24c8, +/* 159e - */ 0, +/* 159f - */ 0, +/* 15a0 - */ 0, +/* 15a1 - _V_66_0F_71_06 */ 0x4171, +/* 15a2 - */ 0, +/* 15a3 - */ 0, +/* 15a4 - */ 0, +/* 15a5 - */ 0, +/* 15a6 - */ 0, +/* 15a7 - */ 0, +/* 15a8 - _0F_72_02 */ 0x24c9, +/* 15a9 - _66_0F_72_02 */ 0x24ca, +/* 15aa - */ 0, +/* 15ab - */ 0, +/* 15ac - */ 0, +/* 15ad - _V_66_0F_72_02 */ 0x4172, +/* 15ae - */ 0, +/* 15af - */ 0, +/* 15b0 - */ 0, +/* 15b1 - */ 0, +/* 15b2 - */ 0, +/* 15b3 - */ 0, +/* 15b4 - _0F_72_04 */ 0x24cb, +/* 15b5 - _66_0F_72_04 */ 0x24cc, +/* 15b6 - */ 0, +/* 15b7 - */ 0, +/* 15b8 - */ 0, +/* 15b9 - _V_66_0F_72_04 */ 0x4173, +/* 15ba - */ 0, +/* 15bb - */ 0, +/* 15bc - */ 0, +/* 15bd - */ 0, +/* 15be - */ 0, +/* 15bf - */ 0, +/* 15c0 - _0F_72_06 */ 0x24cd, +/* 15c1 - _66_0F_72_06 */ 0x24ce, +/* 15c2 - */ 0, +/* 15c3 - */ 0, +/* 15c4 - */ 0, +/* 15c5 - _V_66_0F_72_06 */ 0x4174, +/* 15c6 - */ 0, +/* 15c7 - */ 0, +/* 15c8 - */ 0, +/* 15c9 - */ 0, +/* 15ca - */ 0, +/* 15cb - */ 0, +/* 15cc - _0F_73_02 */ 0x24cf, +/* 15cd - _66_0F_73_02 */ 0x24d0, +/* 15ce - */ 0, +/* 15cf - */ 0, +/* 15d0 - */ 0, +/* 15d1 - _V_66_0F_73_02 */ 0x4175, +/* 15d2 - */ 0, +/* 15d3 - */ 0, +/* 15d4 - */ 0, +/* 15d5 - */ 0, +/* 15d6 - */ 0, +/* 15d7 - */ 0, +/* 15d8 - */ 0, +/* 15d9 - _66_0F_73_03 */ 0x24d1, +/* 15da - */ 0, +/* 15db - */ 0, +/* 15dc - */ 0, +/* 15dd - _V_66_0F_73_03 */ 0x4176, +/* 15de - */ 0, +/* 15df - */ 0, +/* 15e0 - */ 0, +/* 15e1 - */ 0, +/* 15e2 - */ 0, +/* 15e3 - */ 0, +/* 15e4 - _0F_73_06 */ 0x24d2, +/* 15e5 - _66_0F_73_06 */ 0x24d3, +/* 15e6 - */ 0, +/* 15e7 - */ 0, +/* 15e8 - */ 0, +/* 15e9 - _V_66_0F_73_06 */ 0x4177, +/* 15ea - */ 0, +/* 15eb - */ 0, +/* 15ec - */ 0, +/* 15ed - */ 0, +/* 15ee - */ 0, +/* 15ef - */ 0, +/* 15f0 - */ 0, +/* 15f1 - _66_0F_73_07 */ 0x24d4, +/* 15f2 - */ 0, +/* 15f3 - */ 0, +/* 15f4 - */ 0, +/* 15f5 - _V_66_0F_73_07 */ 0x4178, +/* 15f6 - */ 0, +/* 15f7 - */ 0, +/* 15f8 - */ 0, +/* 15f9 - */ 0, +/* 15fa - */ 0, +/* 15fb - */ 0, +/* 15fc - _0F_AE_00 */ 0x4179, +/* 15fd - */ 0, +/* 15fe - _F3_0F_AE_00 */ 0x24d5, +/* 15ff - */ 0, +/* 1600 - */ 0, +/* 1601 - */ 0, +/* 1602 - */ 0, +/* 1603 - */ 0, +/* 1604 - */ 0, +/* 1605 - */ 0, +/* 1606 - */ 0, +/* 1607 - */ 0, +/* 1608 - _0F_AE_01 */ 0x417a, +/* 1609 - */ 0, +/* 160a - _F3_0F_AE_01 */ 0x24d6, +/* 160b - */ 0, +/* 160c - */ 0, +/* 160d - */ 0, +/* 160e - */ 0, +/* 160f - */ 0, +/* 1610 - */ 0, +/* 1611 - */ 0, +/* 1612 - */ 0, +/* 1613 - */ 0, +/* 1614 - _0F_AE_02 */ 0x24d7, +/* 1615 - */ 0, +/* 1616 - _F3_0F_AE_02 */ 0x24d8, +/* 1617 - */ 0, +/* 1618 - _V_0F_AE_02 */ 0x417b, +/* 1619 - */ 0, +/* 161a - */ 0, +/* 161b - */ 0, +/* 161c - */ 0, +/* 161d - */ 0, +/* 161e - */ 0, +/* 161f - */ 0, +/* 1620 - _0F_AE_03 */ 0x24d9, +/* 1621 - */ 0, +/* 1622 - _F3_0F_AE_03 */ 0x24da, +/* 1623 - */ 0, +/* 1624 - _V_0F_AE_03 */ 0x417c, +/* 1625 - */ 0, +/* 1626 - */ 0, +/* 1627 - */ 0, +/* 1628 - */ 0, +/* 1629 - */ 0, +/* 162a - */ 0, +/* 162b - */ 0, +/* 162c - _0F_C7_06 */ 0x24db, +/* 162d - _66_0F_C7_06 */ 0x24dc, +/* 162e - _F3_0F_C7_06 */ 0x24dd, +/* 162f - */ 0, +/* 1630 - */ 0, +/* 1631 - */ 0, +/* 1632 - */ 0, +/* 1633 - */ 0, +/* 1634 - */ 0, +/* 1635 - */ 0, +/* 1636 - */ 0, +/* 1637 - */ 0 +}; + +_InstSharedInfo InstSharedInfoTable[471] = { +{0, 9, 15, 8, 245, 0, 0}, +{0, 11, 17, 8, 245, 0, 0}, +{0, 15, 9, 8, 245, 0, 0}, +{0, 17, 11, 8, 245, 0, 0}, +{1, 1, 33, 8, 245, 0, 0}, +{1, 3, 35, 8, 245, 0, 0}, +{2, 0, 32, 8, 0, 0, 0}, +{3, 0, 32, 8, 0, 0, 0}, +{0, 9, 15, 8, 196, 16, 0}, +{0, 11, 17, 8, 196, 16, 0}, +{0, 15, 9, 8, 196, 16, 0}, +{0, 17, 11, 8, 196, 16, 0}, +{1, 1, 33, 8, 196, 16, 0}, +{1, 3, 35, 8, 196, 16, 0}, +{4, 0, 32, 8, 0, 0, 0}, +{0, 9, 15, 8, 245, 1, 0}, +{0, 11, 17, 8, 245, 1, 0}, +{0, 15, 9, 8, 245, 1, 0}, +{0, 17, 11, 8, 245, 1, 0}, +{1, 1, 33, 8, 245, 1, 0}, +{1, 3, 35, 8, 245, 1, 0}, +{5, 0, 32, 8, 0, 0, 0}, +{6, 0, 32, 8, 0, 0, 0}, +{7, 0, 32, 8, 0, 0, 0}, +{8, 0, 32, 8, 0, 0, 0}, +{0, 9, 15, 8, 229, 0, 16}, +{0, 11, 17, 8, 229, 0, 16}, +{0, 15, 9, 8, 229, 0, 16}, +{0, 17, 11, 8, 229, 0, 16}, +{1, 1, 33, 8, 229, 0, 16}, +{1, 3, 35, 8, 229, 0, 16}, +{9, 0, 0, 8, 213, 17, 32}, +{0, 9, 15, 8, 196, 0, 16}, +{0, 11, 17, 8, 196, 0, 16}, +{0, 15, 9, 8, 196, 0, 16}, +{0, 17, 11, 8, 196, 0, 16}, +{1, 1, 33, 8, 196, 0, 16}, +{1, 3, 35, 8, 196, 0, 16}, +{9, 0, 0, 8, 17, 16, 228}, +{10, 9, 15, 8, 245, 0, 0}, +{10, 11, 17, 8, 245, 0, 0}, +{10, 15, 9, 8, 245, 0, 0}, +{10, 17, 11, 8, 245, 0, 0}, +{11, 1, 33, 8, 245, 0, 0}, +{11, 3, 35, 8, 245, 0, 0}, +{12, 0, 54, 8, 244, 0, 0}, +{13, 0, 54, 8, 0, 0, 0}, +{14, 0, 54, 8, 0, 0, 0}, +{15, 0, 0, 8, 0, 0, 0}, +{16, 42, 11, 8, 0, 0, 0}, +{10, 10, 16, 8, 64, 0, 0}, +{13, 0, 3, 8, 0, 0, 0}, +{17, 17, 11, 8, 33, 0, 212}, +{18, 0, 5, 8, 0, 0, 0}, +{19, 59, 56, 8, 0, 8, 0}, +{20, 59, 56, 8, 0, 8, 0}, +{19, 55, 59, 8, 0, 8, 0}, +{20, 55, 59, 8, 0, 8, 0}, +{13, 0, 40, 13, 0, 32, 0}, +{13, 0, 40, 13, 0, 1, 0}, +{13, 0, 40, 13, 0, 64, 0}, +{13, 0, 40, 13, 0, 65, 0}, +{13, 0, 40, 13, 0, 128, 0}, +{13, 0, 40, 13, 0, 4, 0}, +{13, 0, 40, 13, 0, 160, 0}, +{13, 0, 40, 13, 0, 224, 0}, +{10, 9, 15, 8, 196, 0, 16}, +{10, 11, 17, 8, 196, 0, 16}, +{0, 9, 15, 8, 0, 0, 0}, +{0, 11, 17, 8, 0, 0, 0}, +{21, 9, 15, 8, 0, 0, 0}, +{21, 11, 17, 8, 0, 0, 0}, +{21, 15, 9, 8, 0, 0, 0}, +{21, 17, 11, 8, 0, 0, 0}, +{21, 31, 28, 8, 0, 0, 0}, +{21, 42, 11, 8, 0, 0, 0}, +{21, 28, 31, 8, 0, 0, 0}, +{1, 35, 54, 8, 0, 0, 0}, +{22, 0, 0, 8, 0, 0, 0}, +{9, 0, 38, 9, 0, 0, 0}, +{23, 0, 0, 8, 0, 0, 0}, +{23, 0, 0, 8, 255, 0, 0}, +{11, 0, 0, 8, 213, 0, 0}, +{11, 0, 0, 8, 0, 0, 0}, +{1, 49, 33, 8, 0, 0, 0}, +{1, 50, 35, 8, 0, 0, 0}, +{1, 33, 49, 8, 0, 0, 0}, +{1, 35, 50, 8, 0, 0, 0}, +{24, 55, 56, 8, 0, 8, 0}, +{25, 55, 56, 8, 0, 8, 0}, +{19, 56, 55, 8, 245, 8, 0}, +{26, 56, 55, 8, 245, 8, 0}, +{11, 1, 33, 8, 196, 0, 16}, +{11, 3, 35, 8, 196, 0, 16}, +{19, 33, 56, 8, 0, 8, 0}, +{26, 35, 56, 8, 0, 8, 0}, +{19, 55, 33, 8, 0, 8, 0}, +{26, 55, 35, 8, 0, 8, 0}, +{19, 33, 56, 8, 245, 8, 0}, +{26, 35, 56, 8, 245, 8, 0}, +{1, 1, 53, 8, 0, 0, 0}, +{27, 3, 54, 8, 0, 0, 0}, +{13, 0, 2, 10, 0, 0, 0}, +{13, 0, 0, 10, 0, 0, 0}, +{16, 37, 11, 8, 0, 0, 0}, +{13, 8, 6, 8, 0, 0, 0}, +{13, 0, 0, 8, 0, 0, 0}, +{28, 0, 2, 10, 0, 0, 0}, +{28, 0, 0, 10, 0, 0, 0}, +{11, 0, 0, 14, 0, 0, 0}, +{11, 0, 1, 14, 0, 0, 0}, +{9, 0, 0, 14, 0, 0, 0}, +{28, 0, 0, 10, 255, 0, 0}, +{9, 0, 1, 8, 196, 0, 49}, +{9, 0, 0, 8, 0, 0, 0}, +{29, 0, 57, 8, 0, 0, 0}, +{30, 0, 40, 13, 0, 64, 0}, +{30, 0, 40, 13, 0, 0, 0}, +{31, 0, 40, 13, 0, 0, 0}, +{1, 1, 33, 8, 0, 0, 0}, +{1, 1, 36, 8, 0, 0, 0}, +{11, 33, 1, 8, 0, 0, 0}, +{11, 36, 1, 8, 0, 0, 0}, +{13, 0, 41, 9, 0, 0, 0}, +{13, 0, 41, 12, 0, 0, 0}, +{9, 0, 38, 12, 0, 0, 0}, +{13, 0, 40, 12, 0, 0, 0}, +{1, 59, 33, 8, 0, 0, 0}, +{1, 59, 36, 8, 0, 0, 0}, +{11, 33, 59, 8, 0, 0, 0}, +{11, 36, 59, 8, 0, 0, 0}, +{11, 0, 0, 8, 1, 0, 0}, +{11, 0, 0, 8, 2, 0, 0}, +{11, 0, 0, 8, 8, 0, 0}, +{10, 16, 11, 8, 64, 0, 0}, +{32, 0, 0, 27, 0, 0, 0}, +{32, 0, 0, 8, 0, 0, 0}, +{32, 0, 0, 14, 0, 0, 0}, +{11, 0, 0, 96, 0, 0, 0}, +{10, 0, 17, 8, 0, 0, 0}, +{33, 29, 14, 8, 0, 0, 0}, +{33, 30, 14, 8, 0, 0, 0}, +{33, 14, 29, 8, 0, 0, 0}, +{33, 14, 30, 8, 0, 0, 0}, +{34, 0, 0, 8, 0, 0, 0}, +{35, 17, 11, 31, 0, 32, 0}, +{35, 17, 11, 31, 0, 1, 0}, +{35, 17, 11, 31, 0, 64, 0}, +{35, 17, 11, 31, 0, 65, 0}, +{35, 17, 11, 31, 0, 128, 0}, +{35, 17, 11, 31, 0, 4, 0}, +{35, 17, 11, 31, 0, 160, 0}, +{35, 17, 11, 31, 0, 224, 0}, +{32, 0, 41, 13, 0, 32, 0}, +{32, 0, 41, 13, 0, 1, 0}, +{32, 0, 41, 13, 0, 64, 0}, +{32, 0, 41, 13, 0, 65, 0}, +{32, 0, 41, 13, 0, 128, 0}, +{32, 0, 41, 13, 0, 4, 0}, +{32, 0, 41, 13, 0, 160, 0}, +{32, 0, 41, 13, 0, 224, 0}, +{35, 0, 15, 8, 0, 32, 0}, +{35, 0, 15, 8, 0, 1, 0}, +{35, 0, 15, 8, 0, 64, 0}, +{35, 0, 15, 8, 0, 65, 0}, +{35, 0, 15, 8, 0, 128, 0}, +{35, 0, 15, 8, 0, 4, 0}, +{35, 0, 15, 8, 0, 160, 0}, +{35, 0, 15, 8, 0, 224, 0}, +{36, 0, 32, 8, 0, 0, 0}, +{37, 0, 32, 8, 0, 0, 0}, +{35, 11, 17, 8, 1, 0, 244}, +{38, 11, 17, 8, 197, 0, 48}, +{39, 0, 32, 8, 0, 0, 0}, +{40, 0, 32, 8, 0, 0, 0}, +{32, 0, 0, 8, 255, 0, 0}, +{41, 11, 17, 8, 1, 0, 244}, +{35, 17, 11, 8, 33, 0, 212}, +{41, 9, 15, 8, 245, 0, 0}, +{41, 11, 17, 8, 245, 0, 0}, +{42, 37, 11, 8, 0, 0, 0}, +{35, 15, 11, 8, 0, 0, 0}, +{43, 16, 11, 8, 0, 0, 0}, +{43, 13, 45, 48, 0, 0, 0}, +{44, 0, 54, 8, 0, 0, 0}, +{45, 1, 15, 8, 245, 0, 0}, +{45, 1, 15, 8, 196, 16, 0}, +{45, 1, 15, 8, 245, 1, 0}, +{45, 1, 15, 8, 229, 0, 16}, +{45, 1, 15, 8, 196, 0, 16}, +{46, 1, 15, 8, 245, 0, 0}, +{45, 3, 17, 8, 245, 0, 0}, +{45, 3, 17, 8, 196, 16, 0}, +{45, 3, 17, 8, 245, 1, 0}, +{45, 3, 17, 8, 229, 0, 16}, +{45, 3, 17, 8, 196, 0, 16}, +{46, 3, 17, 8, 245, 0, 0}, +{47, 1, 15, 8, 245, 0, 0}, +{47, 1, 15, 8, 196, 16, 0}, +{47, 1, 15, 8, 245, 1, 0}, +{47, 1, 15, 8, 229, 0, 16}, +{47, 1, 15, 8, 196, 0, 16}, +{48, 1, 15, 8, 245, 0, 0}, +{45, 5, 17, 8, 245, 0, 0}, +{49, 5, 17, 8, 196, 16, 0}, +{45, 5, 17, 8, 245, 1, 0}, +{49, 5, 17, 8, 229, 0, 16}, +{49, 5, 17, 8, 196, 0, 16}, +{46, 5, 17, 8, 245, 0, 0}, +{50, 0, 17, 8, 0, 0, 0}, +{51, 1, 15, 8, 1, 0, 32}, +{51, 1, 15, 8, 1, 1, 32}, +{51, 1, 15, 8, 197, 0, 48}, +{51, 1, 17, 8, 1, 0, 32}, +{51, 1, 17, 8, 1, 1, 32}, +{51, 1, 17, 8, 197, 0, 48}, +{52, 1, 15, 8, 0, 0, 0}, +{53, 0, 1, 24, 0, 0, 0}, +{52, 3, 17, 8, 0, 0, 0}, +{53, 0, 41, 24, 0, 0, 0}, +{51, 51, 15, 8, 33, 0, 0}, +{51, 51, 15, 8, 33, 1, 0}, +{51, 51, 15, 8, 229, 0, 16}, +{51, 51, 17, 8, 33, 0, 0}, +{51, 51, 17, 8, 33, 1, 0}, +{51, 51, 17, 8, 229, 0, 16}, +{51, 52, 15, 8, 1, 0, 32}, +{51, 52, 15, 8, 1, 1, 32}, +{51, 52, 15, 8, 197, 0, 48}, +{51, 52, 17, 8, 1, 0, 32}, +{51, 52, 17, 8, 1, 1, 32}, +{51, 52, 17, 8, 197, 0, 48}, +{46, 0, 21, 16, 0, 0, 0}, +{54, 0, 62, 16, 0, 0, 0}, +{54, 0, 61, 16, 0, 0, 0}, +{54, 0, 0, 16, 0, 0, 0}, +{51, 0, 21, 16, 0, 0, 0}, +{46, 0, 42, 16, 0, 0, 0}, +{46, 0, 20, 16, 0, 0, 0}, +{55, 0, 62, 24, 0, 1, 0}, +{55, 0, 62, 24, 0, 64, 0}, +{55, 0, 62, 24, 0, 65, 0}, +{55, 0, 62, 24, 0, 4, 0}, +{56, 0, 21, 56, 0, 0, 0}, +{46, 0, 23, 16, 0, 0, 0}, +{51, 0, 23, 16, 0, 0, 0}, +{55, 0, 62, 16, 69, 0, 0}, +{55, 0, 62, 24, 69, 0, 0}, +{46, 0, 22, 16, 0, 0, 0}, +{54, 0, 63, 16, 0, 0, 0}, +{56, 0, 22, 56, 0, 0, 0}, +{51, 0, 22, 16, 0, 0, 0}, +{56, 0, 20, 56, 0, 0, 0}, +{51, 0, 20, 16, 0, 0, 0}, +{46, 1, 15, 8, 196, 0, 16}, +{45, 0, 15, 8, 0, 0, 0}, +{45, 0, 15, 8, 245, 0, 0}, +{51, 0, 15, 8, 33, 0, 212}, +{51, 0, 15, 8, 0, 0, 245}, +{46, 3, 17, 8, 196, 0, 16}, +{45, 0, 17, 8, 0, 0, 0}, +{45, 0, 17, 8, 245, 0, 0}, +{51, 0, 17, 8, 33, 0, 212}, +{51, 0, 17, 8, 0, 0, 245}, +{45, 0, 15, 8, 244, 0, 0}, +{45, 0, 17, 8, 244, 0, 0}, +{57, 0, 17, 9, 0, 0, 0}, +{58, 0, 37, 9, 0, 0, 0}, +{57, 0, 17, 12, 0, 0, 0}, +{58, 0, 37, 12, 0, 0, 0}, +{57, 0, 17, 8, 0, 0, 0}, +{46, 0, 17, 8, 0, 0, 0}, +{46, 0, 16, 8, 0, 0, 0}, +{56, 0, 16, 8, 0, 0, 0}, +{46, 0, 16, 8, 64, 0, 0}, +{57, 0, 39, 8, 0, 0, 0}, +{52, 0, 28, 8, 0, 0, 0}, +{59, 0, 16, 8, 0, 0, 0}, +{56, 0, 42, 8, 0, 0, 0}, +{55, 0, 0, 112, 0, 0, 0}, +{55, 0, 0, 8, 0, 0, 0}, +{13, 0, 0, 24, 0, 0, 0}, +{56, 0, 58, 120, 0, 0, 0}, +{55, 0, 0, 120, 0, 0, 0}, +{55, 0, 58, 120, 0, 0, 0}, +{55, 60, 58, 120, 0, 0, 0}, +{60, 0, 0, 8, 0, 0, 0}, +{56, 0, 42, 96, 0, 0, 0}, +{61, 67, 64, 104, 0, 0, 0}, +{61, 67, 64, 96, 0, 0, 0}, +{35, 73, 68, 40, 0, 0, 0}, +{35, 73, 68, 48, 0, 0, 0}, +{35, 71, 68, 40, 0, 0, 0}, +{35, 72, 68, 48, 0, 0, 0}, +{62, 90, 83, 128, 0, 0, 0}, +{63, 81, 68, 128, 0, 0, 0}, +{64, 44, 68, 128, 0, 0, 0}, +{64, 46, 68, 128, 0, 0, 0}, +{35, 68, 73, 40, 0, 0, 0}, +{35, 68, 73, 48, 0, 0, 0}, +{35, 68, 71, 40, 0, 0, 0}, +{35, 68, 72, 48, 0, 0, 0}, +{62, 83, 90, 128, 0, 0, 0}, +{64, 68, 44, 128, 0, 0, 0}, +{64, 68, 46, 128, 0, 0, 0}, +{65, 72, 68, 40, 0, 0, 0}, +{35, 46, 68, 48, 0, 0, 0}, +{35, 72, 68, 56, 0, 0, 0}, +{66, 81, 68, 128, 0, 0, 0}, +{67, 81, 68, 128, 0, 0, 0}, +{62, 89, 83, 128, 0, 0, 0}, +{35, 68, 46, 40, 0, 0, 0}, +{35, 68, 46, 48, 0, 0, 0}, +{62, 68, 46, 128, 0, 0, 0}, +{34, 73, 68, 40, 0, 0, 0}, +{34, 73, 68, 48, 0, 0, 0}, +{67, 88, 83, 128, 0, 0, 0}, +{35, 73, 68, 56, 0, 0, 0}, +{56, 0, 42, 40, 0, 0, 0}, +{34, 67, 68, 40, 0, 0, 0}, +{34, 67, 68, 48, 0, 0, 0}, +{42, 18, 68, 40, 0, 0, 0}, +{42, 18, 68, 48, 0, 0, 0}, +{35, 68, 47, 40, 0, 0, 0}, +{35, 68, 47, 48, 0, 0, 0}, +{35, 68, 44, 88, 0, 0, 0}, +{35, 68, 46, 88, 0, 0, 0}, +{62, 83, 92, 128, 0, 0, 0}, +{34, 72, 64, 40, 0, 0, 0}, +{34, 73, 64, 48, 0, 0, 0}, +{42, 71, 13, 40, 0, 0, 0}, +{42, 72, 13, 48, 0, 0, 0}, +{62, 80, 78, 128, 0, 0, 0}, +{34, 71, 68, 40, 69, 0, 0}, +{34, 72, 68, 48, 0, 0, 0}, +{62, 71, 68, 128, 0, 0, 0}, +{62, 72, 68, 128, 0, 0, 0}, +{68, 69, 12, 40, 0, 0, 0}, +{68, 69, 12, 48, 0, 0, 0}, +{69, 83, 13, 128, 0, 0, 0}, +{34, 71, 68, 40, 0, 0, 0}, +{34, 71, 68, 48, 0, 0, 0}, +{62, 91, 83, 128, 0, 0, 0}, +{62, 90, 68, 128, 0, 0, 0}, +{34, 66, 64, 32, 0, 0, 0}, +{34, 67, 64, 32, 0, 0, 0}, +{70, 18, 64, 32, 0, 0, 0}, +{70, 18, 68, 48, 0, 0, 0}, +{62, 79, 68, 128, 0, 0, 0}, +{35, 67, 64, 32, 0, 0, 0}, +{71, 67, 64, 40, 0, 0, 0}, +{71, 73, 68, 48, 0, 0, 0}, +{67, 73, 68, 128, 0, 0, 0}, +{32, 0, 0, 32, 0, 0, 0}, +{72, 0, 0, 128, 0, 0, 0}, +{73, 13, 18, 112, 0, 0, 0}, +{74, 7, 69, 88, 0, 0, 0}, +{75, 69, 68, 88, 0, 0, 0}, +{73, 18, 13, 112, 0, 0, 0}, +{34, 69, 68, 88, 0, 0, 0}, +{76, 69, 68, 88, 0, 0, 0}, +{32, 72, 68, 112, 0, 0, 0}, +{32, 68, 72, 112, 0, 0, 0}, +{34, 73, 68, 56, 0, 0, 0}, +{70, 64, 18, 32, 0, 0, 0}, +{70, 68, 18, 48, 0, 0, 0}, +{62, 68, 79, 128, 0, 0, 0}, +{35, 64, 67, 32, 0, 0, 0}, +{77, 0, 42, 8, 0, 0, 0}, +{78, 0, 43, 8, 0, 0, 0}, +{79, 0, 43, 8, 0, 0, 0}, +{80, 17, 11, 80, 64, 0, 0}, +{81, 1, 17, 8, 1, 0, 244}, +{49, 1, 17, 8, 1, 0, 244}, +{34, 17, 11, 8, 64, 0, 245}, +{82, 17, 11, 112, 0, 0, 0}, +{83, 17, 11, 8, 65, 0, 180}, +{84, 73, 68, 40, 0, 0, 0}, +{84, 73, 68, 48, 0, 0, 0}, +{84, 71, 68, 40, 0, 0, 0}, +{84, 72, 68, 48, 0, 0, 0}, +{85, 88, 83, 128, 0, 0, 0}, +{85, 81, 68, 128, 0, 0, 0}, +{71, 25, 64, 40, 0, 0, 0}, +{71, 25, 68, 48, 0, 0, 0}, +{86, 81, 68, 128, 0, 0, 0}, +{87, 65, 12, 40, 0, 0, 0}, +{71, 69, 12, 48, 0, 0, 0}, +{88, 68, 13, 128, 0, 0, 0}, +{71, 73, 68, 40, 0, 0, 0}, +{86, 88, 83, 128, 0, 0, 0}, +{89, 0, 48, 8, 64, 0, 0}, +{56, 0, 46, 112, 0, 0, 0}, +{68, 65, 68, 48, 0, 0, 0}, +{68, 69, 64, 48, 0, 0, 0}, +{62, 68, 72, 128, 0, 0, 0}, +{76, 65, 12, 40, 0, 0, 0}, +{76, 69, 12, 48, 0, 0, 0}, +{69, 68, 13, 128, 0, 0, 0}, +{34, 67, 64, 40, 0, 0, 0}, +{35, 64, 46, 40, 0, 0, 0}, +{34, 42, 68, 56, 0, 0, 0}, +{62, 92, 83, 128, 0, 0, 0}, +{34, 67, 64, 48, 0, 0, 0}, +{76, 65, 64, 40, 0, 0, 0}, +{76, 69, 68, 48, 0, 0, 0}, +{90, 69, 68, 128, 0, 0, 0}, +{51, 0, 42, 16, 0, 0, 0}, +{91, 0, 42, 16, 0, 0, 0}, +{91, 0, 20, 16, 0, 0, 0}, +{92, 0, 0, 16, 0, 0, 0}, +{93, 0, 34, 16, 0, 0, 0}, +{94, 0, 34, 16, 0, 0, 0}, +{34, 67, 64, 64, 0, 0, 0}, +{34, 73, 68, 64, 0, 0, 0}, +{71, 73, 68, 72, 0, 0, 0}, +{34, 73, 68, 80, 0, 0, 0}, +{62, 44, 83, 128, 0, 0, 0}, +{62, 46, 85, 128, 0, 0, 0}, +{62, 47, 85, 128, 0, 0, 0}, +{62, 73, 68, 128, 0, 0, 0}, +{34, 72, 68, 72, 0, 0, 0}, +{34, 71, 68, 72, 0, 0, 0}, +{34, 70, 68, 72, 0, 0, 0}, +{62, 70, 68, 128, 0, 0, 0}, +{34, 73, 68, 72, 0, 0, 0}, +{35, 47, 68, 72, 0, 0, 0}, +{62, 47, 68, 128, 0, 0, 0}, +{67, 88, 92, 128, 0, 0, 0}, +{73, 47, 13, 112, 0, 0, 0}, +{67, 88, 83, 136, 0, 0, 0}, +{67, 81, 68, 136, 0, 0, 0}, +{34, 73, 68, 152, 0, 0, 0}, +{62, 73, 68, 152, 0, 0, 0}, +{67, 81, 68, 152, 0, 0, 0}, +{35, 17, 11, 8, 0, 0, 0}, +{35, 15, 13, 80, 0, 0, 0}, +{35, 11, 17, 8, 0, 0, 0}, +{35, 17, 13, 80, 0, 0, 0}, +{67, 90, 83, 128, 0, 0, 0}, +{86, 87, 85, 128, 0, 0, 0}, +{71, 71, 68, 72, 0, 0, 0}, +{71, 72, 68, 72, 0, 0, 0}, +{71, 67, 64, 64, 0, 0, 0}, +{71, 73, 68, 64, 0, 0, 0}, +{71, 68, 26, 72, 0, 0, 0}, +{88, 68, 76, 128, 0, 0, 0}, +{71, 68, 27, 72, 0, 0, 0}, +{88, 68, 77, 128, 0, 0, 0}, +{95, 68, 18, 72, 0, 0, 0}, +{67, 68, 79, 128, 0, 0, 0}, +{71, 68, 18, 72, 0, 0, 0}, +{67, 68, 75, 128, 0, 0, 0}, +{67, 85, 73, 128, 0, 0, 0}, +{71, 24, 68, 72, 0, 0, 0}, +{95, 18, 68, 72, 0, 0, 0}, +{71, 73, 68, 144, 0, 0, 0}, +{86, 81, 68, 144, 0, 0, 0}, +{71, 73, 68, 80, 0, 0, 0}, +{71, 73, 68, 152, 0, 0, 0}, +{67, 73, 68, 152, 0, 0, 0}, +{96, 1, 65, 32, 0, 0, 0}, +{56, 1, 69, 48, 0, 0, 0}, +{97, 69, 81, 128, 0, 0, 0}, +{98, 0, 13, 112, 0, 0, 0}, +{56, 0, 44, 8, 0, 0, 0}, +{64, 0, 44, 128, 0, 0, 0}, +{56, 0, 42, 112, 0, 0, 0}, +{99, 75, 13, 8, 0, 0, 0}, +{98, 0, 17, 8, 0, 0, 0}, +{100, 67, 64, 96, 0, 0, 0} +}; + +uint16_t CmpMnemonicOffsets[8] = { +0, 9, 18, 27, 39, 49, 59, 69 +}; +uint16_t VCmpMnemonicOffsets[32] = { +0, 10, 20, 30, 43, 54, 65, 76, 87, 100, 111, 122, 135, 149, 159, 169, 181, 194, 207, 220, 235, 249, 263, 277, 290, 303, 317, 331, 347, 361, 374, 387 +}; \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/insts.h b/module/src/main/cpp/whale/src/dbi/x86/distorm/insts.h new file mode 100644 index 00000000..77db087b --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/insts.h @@ -0,0 +1,64 @@ +/* +insts.h + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ + + +#ifndef INSTS_H +#define INSTS_H + +#include "instructions.h" + + +/* Flags Table */ +extern _iflags FlagsTable[]; + +/* Root Trie DB */ +extern _InstSharedInfo InstSharedInfoTable[]; +extern _InstInfo InstInfos[]; +extern _InstInfoEx InstInfosEx[]; +extern _InstNode InstructionsTree[]; + +/* 3DNow! Trie DB */ +extern _InstNode Table_0F_0F; +/* AVX related: */ +extern _InstNode Table_0F, Table_0F_38, Table_0F_3A; + +/* + * The inst_lookup will return on of these two instructions according to the specified decoding mode. + * ARPL or MOVSXD on 64 bits is one byte instruction at index 0x63. + */ +extern _InstInfo II_MOVSXD; + +/* + * The NOP instruction can be prefixed by REX in 64bits, therefore we have to decide in runtime whether it's an XCHG or NOP instruction. + * If 0x90 is prefixed by a usable REX it will become XCHG, otherwise it will become a NOP. + * Also note that if it's prefixed by 0xf3, it becomes a Pause. + */ +extern _InstInfo II_NOP; +extern _InstInfo II_PAUSE; + +/* + * RDRAND and VMPTRLD share same 2.3 bytes opcode, and then alternates on the MOD bits, + * RDRAND is OT_FULL_REG while VMPTRLD is OT_MEM, and there's no such mixed type. + * So a hack into the inst_lookup was added for this decision, the DB isn't flexible enough. :( + */ +extern _InstInfo II_RDRAND; + +/* + * Used for letting the extract operand know the type of operands without knowing the + * instruction itself yet, because of the way those instructions work. + * See function instructions.c!inst_lookup_3dnow. + */ +extern _InstInfo II_3DNOW; + +/* Helper tables for pseudo compare mnemonics. */ +extern uint16_t CmpMnemonicOffsets[8]; /* SSE */ +extern uint16_t VCmpMnemonicOffsets[32]; /* AVX */ + +#endif /* INSTS_H */ diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/mnemonics.c b/module/src/main/cpp/whale/src/dbi/x86/distorm/mnemonics.c new file mode 100644 index 00000000..90212c3a --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/mnemonics.c @@ -0,0 +1,312 @@ +/* +mnemonics.c + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ + + +#include "mnemonics.h" + +#ifndef DISTORM_LIGHT + +const unsigned char _MNEMONICS[] = +"\x09" "UNDEFINED\0" "\x03" "ADD\0" "\x04" "PUSH\0" "\x03" "POP\0" \ +"\x02" "OR\0" "\x03" "ADC\0" "\x03" "SBB\0" "\x03" "AND\0" "\x03" "DAA\0" \ +"\x03" "SUB\0" "\x03" "DAS\0" "\x03" "XOR\0" "\x03" "AAA\0" "\x03" "CMP\0" \ +"\x03" "AAS\0" "\x03" "INC\0" "\x03" "DEC\0" "\x05" "PUSHA\0" "\x04" "POPA\0" \ +"\x05" "BOUND\0" "\x04" "ARPL\0" "\x04" "IMUL\0" "\x03" "INS\0" "\x04" "OUTS\0" \ +"\x02" "JO\0" "\x03" "JNO\0" "\x02" "JB\0" "\x03" "JAE\0" "\x02" "JZ\0" \ +"\x03" "JNZ\0" "\x03" "JBE\0" "\x02" "JA\0" "\x02" "JS\0" "\x03" "JNS\0" \ +"\x02" "JP\0" "\x03" "JNP\0" "\x02" "JL\0" "\x03" "JGE\0" "\x03" "JLE\0" \ +"\x02" "JG\0" "\x04" "TEST\0" "\x04" "XCHG\0" "\x03" "MOV\0" "\x03" "LEA\0" \ +"\x03" "CBW\0" "\x04" "CWDE\0" "\x04" "CDQE\0" "\x03" "CWD\0" "\x03" "CDQ\0" \ +"\x03" "CQO\0" "\x08" "CALL FAR\0" "\x05" "PUSHF\0" "\x04" "POPF\0" \ +"\x04" "SAHF\0" "\x04" "LAHF\0" "\x04" "MOVS\0" "\x04" "CMPS\0" "\x04" "STOS\0" \ +"\x04" "LODS\0" "\x04" "SCAS\0" "\x03" "RET\0" "\x03" "LES\0" "\x03" "LDS\0" \ +"\x05" "ENTER\0" "\x05" "LEAVE\0" "\x04" "RETF\0" "\x05" "INT 3\0" \ +"\x03" "INT\0" "\x04" "INTO\0" "\x04" "IRET\0" "\x03" "AAM\0" "\x03" "AAD\0" \ +"\x04" "SALC\0" "\x04" "XLAT\0" "\x06" "LOOPNZ\0" "\x05" "LOOPZ\0" \ +"\x04" "LOOP\0" "\x04" "JCXZ\0" "\x05" "JECXZ\0" "\x05" "JRCXZ\0" "\x02" "IN\0" \ +"\x03" "OUT\0" "\x04" "CALL\0" "\x03" "JMP\0" "\x07" "JMP FAR\0" "\x04" "INT1\0" \ +"\x03" "HLT\0" "\x03" "CMC\0" "\x03" "CLC\0" "\x03" "STC\0" "\x03" "CLI\0" \ +"\x03" "STI\0" "\x03" "CLD\0" "\x03" "STD\0" "\x03" "LAR\0" "\x03" "LSL\0" \ +"\x07" "SYSCALL\0" "\x04" "CLTS\0" "\x06" "SYSRET\0" "\x04" "INVD\0" \ +"\x06" "WBINVD\0" "\x03" "UD2\0" "\x05" "FEMMS\0" "\x03" "NOP\0" "\x05" "WRMSR\0" \ +"\x05" "RDTSC\0" "\x05" "RDMSR\0" "\x05" "RDPMC\0" "\x08" "SYSENTER\0" \ +"\x07" "SYSEXIT\0" "\x06" "GETSEC\0" "\x05" "CMOVO\0" "\x06" "CMOVNO\0" \ +"\x05" "CMOVB\0" "\x06" "CMOVAE\0" "\x05" "CMOVZ\0" "\x06" "CMOVNZ\0" \ +"\x06" "CMOVBE\0" "\x05" "CMOVA\0" "\x05" "CMOVS\0" "\x06" "CMOVNS\0" \ +"\x05" "CMOVP\0" "\x06" "CMOVNP\0" "\x05" "CMOVL\0" "\x06" "CMOVGE\0" \ +"\x06" "CMOVLE\0" "\x05" "CMOVG\0" "\x04" "SETO\0" "\x05" "SETNO\0" \ +"\x04" "SETB\0" "\x05" "SETAE\0" "\x04" "SETZ\0" "\x05" "SETNZ\0" "\x05" "SETBE\0" \ +"\x04" "SETA\0" "\x04" "SETS\0" "\x05" "SETNS\0" "\x04" "SETP\0" "\x05" "SETNP\0" \ +"\x04" "SETL\0" "\x05" "SETGE\0" "\x05" "SETLE\0" "\x04" "SETG\0" "\x05" "CPUID\0" \ +"\x02" "BT\0" "\x04" "SHLD\0" "\x03" "RSM\0" "\x03" "BTS\0" "\x04" "SHRD\0" \ +"\x07" "CMPXCHG\0" "\x03" "LSS\0" "\x03" "BTR\0" "\x03" "LFS\0" "\x03" "LGS\0" \ +"\x05" "MOVZX\0" "\x03" "BTC\0" "\x05" "MOVSX\0" "\x04" "XADD\0" "\x06" "MOVNTI\0" \ +"\x05" "BSWAP\0" "\x03" "ROL\0" "\x03" "ROR\0" "\x03" "RCL\0" "\x03" "RCR\0" \ +"\x03" "SHL\0" "\x03" "SHR\0" "\x03" "SAL\0" "\x03" "SAR\0" "\x06" "XABORT\0" \ +"\x06" "XBEGIN\0" "\x04" "FADD\0" "\x04" "FMUL\0" "\x04" "FCOM\0" "\x05" "FCOMP\0" \ +"\x04" "FSUB\0" "\x05" "FSUBR\0" "\x04" "FDIV\0" "\x05" "FDIVR\0" "\x03" "FLD\0" \ +"\x03" "FST\0" "\x04" "FSTP\0" "\x06" "FLDENV\0" "\x05" "FLDCW\0" "\x04" "FXCH\0" \ +"\x04" "FNOP\0" "\x04" "FCHS\0" "\x04" "FABS\0" "\x04" "FTST\0" "\x04" "FXAM\0" \ +"\x04" "FLD1\0" "\x06" "FLDL2T\0" "\x06" "FLDL2E\0" "\x05" "FLDPI\0" \ +"\x06" "FLDLG2\0" "\x06" "FLDLN2\0" "\x04" "FLDZ\0" "\x05" "F2XM1\0" \ +"\x05" "FYL2X\0" "\x05" "FPTAN\0" "\x06" "FPATAN\0" "\x07" "FXTRACT\0" \ +"\x06" "FPREM1\0" "\x07" "FDECSTP\0" "\x07" "FINCSTP\0" "\x05" "FPREM\0" \ +"\x07" "FYL2XP1\0" "\x05" "FSQRT\0" "\x07" "FSINCOS\0" "\x07" "FRNDINT\0" \ +"\x06" "FSCALE\0" "\x04" "FSIN\0" "\x04" "FCOS\0" "\x05" "FIADD\0" \ +"\x05" "FIMUL\0" "\x05" "FICOM\0" "\x06" "FICOMP\0" "\x05" "FISUB\0" \ +"\x06" "FISUBR\0" "\x05" "FIDIV\0" "\x06" "FIDIVR\0" "\x06" "FCMOVB\0" \ +"\x06" "FCMOVE\0" "\x07" "FCMOVBE\0" "\x06" "FCMOVU\0" "\x07" "FUCOMPP\0" \ +"\x04" "FILD\0" "\x06" "FISTTP\0" "\x04" "FIST\0" "\x05" "FISTP\0" \ +"\x07" "FCMOVNB\0" "\x07" "FCMOVNE\0" "\x08" "FCMOVNBE\0" "\x07" "FCMOVNU\0" \ +"\x04" "FENI\0" "\x06" "FEDISI\0" "\x06" "FSETPM\0" "\x06" "FUCOMI\0" \ +"\x05" "FCOMI\0" "\x06" "FRSTOR\0" "\x05" "FFREE\0" "\x05" "FUCOM\0" \ +"\x06" "FUCOMP\0" "\x05" "FADDP\0" "\x05" "FMULP\0" "\x06" "FCOMPP\0" \ +"\x06" "FSUBRP\0" "\x05" "FSUBP\0" "\x06" "FDIVRP\0" "\x05" "FDIVP\0" \ +"\x04" "FBLD\0" "\x05" "FBSTP\0" "\x07" "FUCOMIP\0" "\x06" "FCOMIP\0" \ +"\x03" "NOT\0" "\x03" "NEG\0" "\x03" "MUL\0" "\x03" "DIV\0" "\x04" "IDIV\0" \ +"\x04" "SLDT\0" "\x03" "STR\0" "\x04" "LLDT\0" "\x03" "LTR\0" "\x04" "VERR\0" \ +"\x04" "VERW\0" "\x04" "SGDT\0" "\x04" "SIDT\0" "\x04" "LGDT\0" "\x04" "LIDT\0" \ +"\x04" "SMSW\0" "\x04" "LMSW\0" "\x06" "INVLPG\0" "\x06" "VMCALL\0" \ +"\x08" "VMLAUNCH\0" "\x08" "VMRESUME\0" "\x06" "VMXOFF\0" "\x07" "MONITOR\0" \ +"\x05" "MWAIT\0" "\x06" "XGETBV\0" "\x06" "XSETBV\0" "\x06" "VMFUNC\0" \ +"\x04" "XEND\0" "\x05" "VMRUN\0" "\x07" "VMMCALL\0" "\x06" "VMLOAD\0" \ +"\x06" "VMSAVE\0" "\x04" "STGI\0" "\x04" "CLGI\0" "\x06" "SKINIT\0" \ +"\x07" "INVLPGA\0" "\x06" "SWAPGS\0" "\x06" "RDTSCP\0" "\x08" "PREFETCH\0" \ +"\x09" "PREFETCHW\0" "\x05" "PI2FW\0" "\x05" "PI2FD\0" "\x05" "PF2IW\0" \ +"\x05" "PF2ID\0" "\x06" "PFNACC\0" "\x07" "PFPNACC\0" "\x07" "PFCMPGE\0" \ +"\x05" "PFMIN\0" "\x05" "PFRCP\0" "\x07" "PFRSQRT\0" "\x05" "PFSUB\0" \ +"\x05" "PFADD\0" "\x07" "PFCMPGT\0" "\x05" "PFMAX\0" "\x08" "PFRCPIT1\0" \ +"\x08" "PFRSQIT1\0" "\x06" "PFSUBR\0" "\x05" "PFACC\0" "\x07" "PFCMPEQ\0" \ +"\x05" "PFMUL\0" "\x08" "PFRCPIT2\0" "\x07" "PMULHRW\0" "\x06" "PSWAPD\0" \ +"\x07" "PAVGUSB\0" "\x06" "MOVUPS\0" "\x06" "MOVUPD\0" "\x05" "MOVSS\0" \ +"\x05" "MOVSD\0" "\x07" "VMOVUPS\0" "\x07" "VMOVUPD\0" "\x06" "VMOVSS\0" \ +"\x06" "VMOVSD\0" "\x07" "MOVHLPS\0" "\x06" "MOVLPS\0" "\x06" "MOVLPD\0" \ +"\x08" "MOVSLDUP\0" "\x07" "MOVDDUP\0" "\x08" "VMOVHLPS\0" "\x07" "VMOVLPS\0" \ +"\x07" "VMOVLPD\0" "\x09" "VMOVSLDUP\0" "\x08" "VMOVDDUP\0" "\x08" "UNPCKLPS\0" \ +"\x08" "UNPCKLPD\0" "\x09" "VUNPCKLPS\0" "\x09" "VUNPCKLPD\0" "\x08" "UNPCKHPS\0" \ +"\x08" "UNPCKHPD\0" "\x09" "VUNPCKHPS\0" "\x09" "VUNPCKHPD\0" "\x07" "MOVLHPS\0" \ +"\x06" "MOVHPS\0" "\x06" "MOVHPD\0" "\x08" "MOVSHDUP\0" "\x08" "VMOVLHPS\0" \ +"\x07" "VMOVHPS\0" "\x07" "VMOVHPD\0" "\x09" "VMOVSHDUP\0" "\x0b" "PREFETCHNTA\0" \ +"\x0a" "PREFETCHT0\0" "\x0a" "PREFETCHT1\0" "\x0a" "PREFETCHT2\0" "\x06" "MOVAPS\0" \ +"\x06" "MOVAPD\0" "\x07" "VMOVAPS\0" "\x07" "VMOVAPD\0" "\x08" "CVTPI2PS\0" \ +"\x08" "CVTPI2PD\0" "\x08" "CVTSI2SS\0" "\x08" "CVTSI2SD\0" "\x09" "VCVTSI2SS\0" \ +"\x09" "VCVTSI2SD\0" "\x07" "MOVNTPS\0" "\x07" "MOVNTPD\0" "\x07" "MOVNTSS\0" \ +"\x07" "MOVNTSD\0" "\x08" "VMOVNTPS\0" "\x08" "VMOVNTPD\0" "\x09" "CVTTPS2PI\0" \ +"\x09" "CVTTPD2PI\0" "\x09" "CVTTSS2SI\0" "\x09" "CVTTSD2SI\0" "\x0a" "VCVTTSS2SI\0" \ +"\x0a" "VCVTTSD2SI\0" "\x08" "CVTPS2PI\0" "\x08" "CVTPD2PI\0" "\x08" "CVTSS2SI\0" \ +"\x08" "CVTSD2SI\0" "\x09" "VCVTSS2SI\0" "\x09" "VCVTSD2SI\0" "\x07" "UCOMISS\0" \ +"\x07" "UCOMISD\0" "\x08" "VUCOMISS\0" "\x08" "VUCOMISD\0" "\x06" "COMISS\0" \ +"\x06" "COMISD\0" "\x07" "VCOMISS\0" "\x07" "VCOMISD\0" "\x08" "MOVMSKPS\0" \ +"\x08" "MOVMSKPD\0" "\x09" "VMOVMSKPS\0" "\x09" "VMOVMSKPD\0" "\x06" "SQRTPS\0" \ +"\x06" "SQRTPD\0" "\x06" "SQRTSS\0" "\x06" "SQRTSD\0" "\x07" "VSQRTPS\0" \ +"\x07" "VSQRTPD\0" "\x07" "VSQRTSS\0" "\x07" "VSQRTSD\0" "\x07" "RSQRTPS\0" \ +"\x07" "RSQRTSS\0" "\x08" "VRSQRTPS\0" "\x08" "VRSQRTSS\0" "\x05" "RCPPS\0" \ +"\x05" "RCPSS\0" "\x06" "VRCPPS\0" "\x06" "VRCPSS\0" "\x05" "ANDPS\0" \ +"\x05" "ANDPD\0" "\x06" "VANDPS\0" "\x06" "VANDPD\0" "\x06" "ANDNPS\0" \ +"\x06" "ANDNPD\0" "\x07" "VANDNPS\0" "\x07" "VANDNPD\0" "\x04" "ORPS\0" \ +"\x04" "ORPD\0" "\x05" "VORPS\0" "\x05" "VORPD\0" "\x05" "XORPS\0" \ +"\x05" "XORPD\0" "\x06" "VXORPS\0" "\x06" "VXORPD\0" "\x05" "ADDPS\0" \ +"\x05" "ADDPD\0" "\x05" "ADDSS\0" "\x05" "ADDSD\0" "\x06" "VADDPS\0" \ +"\x06" "VADDPD\0" "\x06" "VADDSS\0" "\x06" "VADDSD\0" "\x05" "MULPS\0" \ +"\x05" "MULPD\0" "\x05" "MULSS\0" "\x05" "MULSD\0" "\x06" "VMULPS\0" \ +"\x06" "VMULPD\0" "\x06" "VMULSS\0" "\x06" "VMULSD\0" "\x08" "CVTPS2PD\0" \ +"\x08" "CVTPD2PS\0" "\x08" "CVTSS2SD\0" "\x08" "CVTSD2SS\0" "\x09" "VCVTPS2PD\0" \ +"\x09" "VCVTPD2PS\0" "\x09" "VCVTSS2SD\0" "\x09" "VCVTSD2SS\0" "\x08" "CVTDQ2PS\0" \ +"\x08" "CVTPS2DQ\0" "\x09" "CVTTPS2DQ\0" "\x09" "VCVTDQ2PS\0" "\x09" "VCVTPS2DQ\0" \ +"\x0a" "VCVTTPS2DQ\0" "\x05" "SUBPS\0" "\x05" "SUBPD\0" "\x05" "SUBSS\0" \ +"\x05" "SUBSD\0" "\x06" "VSUBPS\0" "\x06" "VSUBPD\0" "\x06" "VSUBSS\0" \ +"\x06" "VSUBSD\0" "\x05" "MINPS\0" "\x05" "MINPD\0" "\x05" "MINSS\0" \ +"\x05" "MINSD\0" "\x06" "VMINPS\0" "\x06" "VMINPD\0" "\x06" "VMINSS\0" \ +"\x06" "VMINSD\0" "\x05" "DIVPS\0" "\x05" "DIVPD\0" "\x05" "DIVSS\0" \ +"\x05" "DIVSD\0" "\x06" "VDIVPS\0" "\x06" "VDIVPD\0" "\x06" "VDIVSS\0" \ +"\x06" "VDIVSD\0" "\x05" "MAXPS\0" "\x05" "MAXPD\0" "\x05" "MAXSS\0" \ +"\x05" "MAXSD\0" "\x06" "VMAXPS\0" "\x06" "VMAXPD\0" "\x06" "VMAXSS\0" \ +"\x06" "VMAXSD\0" "\x09" "PUNPCKLBW\0" "\x0a" "VPUNPCKLBW\0" "\x09" "PUNPCKLWD\0" \ +"\x0a" "VPUNPCKLWD\0" "\x09" "PUNPCKLDQ\0" "\x0a" "VPUNPCKLDQ\0" "\x08" "PACKSSWB\0" \ +"\x09" "VPACKSSWB\0" "\x07" "PCMPGTB\0" "\x08" "VPCMPGTB\0" "\x07" "PCMPGTW\0" \ +"\x08" "VPCMPGTW\0" "\x07" "PCMPGTD\0" "\x08" "VPCMPGTD\0" "\x08" "PACKUSWB\0" \ +"\x09" "VPACKUSWB\0" "\x09" "PUNPCKHBW\0" "\x0a" "VPUNPCKHBW\0" "\x09" "PUNPCKHWD\0" \ +"\x0a" "VPUNPCKHWD\0" "\x09" "PUNPCKHDQ\0" "\x0a" "VPUNPCKHDQ\0" "\x08" "PACKSSDW\0" \ +"\x09" "VPACKSSDW\0" "\x0a" "PUNPCKLQDQ\0" "\x0b" "VPUNPCKLQDQ\0" "\x0a" "PUNPCKHQDQ\0" \ +"\x0b" "VPUNPCKHQDQ\0" "\x04" "MOVD\0" "\x04" "MOVQ\0" "\x05" "VMOVD\0" \ +"\x05" "VMOVQ\0" "\x06" "MOVDQA\0" "\x06" "MOVDQU\0" "\x07" "VMOVDQA\0" \ +"\x07" "VMOVDQU\0" "\x06" "PSHUFW\0" "\x06" "PSHUFD\0" "\x07" "PSHUFHW\0" \ +"\x07" "PSHUFLW\0" "\x07" "VPSHUFD\0" "\x08" "VPSHUFHW\0" "\x08" "VPSHUFLW\0" \ +"\x07" "PCMPEQB\0" "\x08" "VPCMPEQB\0" "\x07" "PCMPEQW\0" "\x08" "VPCMPEQW\0" \ +"\x07" "PCMPEQD\0" "\x08" "VPCMPEQD\0" "\x04" "EMMS\0" "\x0a" "VZEROUPPER\0" \ +"\x08" "VZEROALL\0" "\x06" "VMREAD\0" "\x05" "EXTRQ\0" "\x07" "INSERTQ\0" \ +"\x07" "VMWRITE\0" "\x08" "CVTPH2PS\0" "\x08" "CVTPS2PH\0" "\x06" "HADDPD\0" \ +"\x06" "HADDPS\0" "\x07" "VHADDPD\0" "\x07" "VHADDPS\0" "\x06" "HSUBPD\0" \ +"\x06" "HSUBPS\0" "\x07" "VHSUBPD\0" "\x07" "VHSUBPS\0" "\x05" "XSAVE\0" \ +"\x07" "XSAVE64\0" "\x06" "LFENCE\0" "\x06" "XRSTOR\0" "\x08" "XRSTOR64\0" \ +"\x06" "MFENCE\0" "\x08" "XSAVEOPT\0" "\x0a" "XSAVEOPT64\0" "\x06" "SFENCE\0" \ +"\x07" "CLFLUSH\0" "\x06" "POPCNT\0" "\x03" "BSF\0" "\x05" "TZCNT\0" \ +"\x03" "BSR\0" "\x05" "LZCNT\0" "\x07" "CMPEQPS\0" "\x07" "CMPLTPS\0" \ +"\x07" "CMPLEPS\0" "\x0a" "CMPUNORDPS\0" "\x08" "CMPNEQPS\0" "\x08" "CMPNLTPS\0" \ +"\x08" "CMPNLEPS\0" "\x08" "CMPORDPS\0" "\x07" "CMPEQPD\0" "\x07" "CMPLTPD\0" \ +"\x07" "CMPLEPD\0" "\x0a" "CMPUNORDPD\0" "\x08" "CMPNEQPD\0" "\x08" "CMPNLTPD\0" \ +"\x08" "CMPNLEPD\0" "\x08" "CMPORDPD\0" "\x07" "CMPEQSS\0" "\x07" "CMPLTSS\0" \ +"\x07" "CMPLESS\0" "\x0a" "CMPUNORDSS\0" "\x08" "CMPNEQSS\0" "\x08" "CMPNLTSS\0" \ +"\x08" "CMPNLESS\0" "\x08" "CMPORDSS\0" "\x07" "CMPEQSD\0" "\x07" "CMPLTSD\0" \ +"\x07" "CMPLESD\0" "\x0a" "CMPUNORDSD\0" "\x08" "CMPNEQSD\0" "\x08" "CMPNLTSD\0" \ +"\x08" "CMPNLESD\0" "\x08" "CMPORDSD\0" "\x08" "VCMPEQPS\0" "\x08" "VCMPLTPS\0" \ +"\x08" "VCMPLEPS\0" "\x0b" "VCMPUNORDPS\0" "\x09" "VCMPNEQPS\0" "\x09" "VCMPNLTPS\0" \ +"\x09" "VCMPNLEPS\0" "\x09" "VCMPORDPS\0" "\x0b" "VCMPEQ_UQPS\0" "\x09" "VCMPNGEPS\0" \ +"\x09" "VCMPNGTPS\0" "\x0b" "VCMPFALSEPS\0" "\x0c" "VCMPNEQ_OQPS\0" "\x08" "VCMPGEPS\0" \ +"\x08" "VCMPGTPS\0" "\x0a" "VCMPTRUEPS\0" "\x0b" "VCMPEQ_OSPS\0" "\x0b" "VCMPLT_OQPS\0" \ +"\x0b" "VCMPLE_OQPS\0" "\x0d" "VCMPUNORD_SPS\0" "\x0c" "VCMPNEQ_USPS\0" \ +"\x0c" "VCMPNLT_UQPS\0" "\x0c" "VCMPNLE_UQPS\0" "\x0b" "VCMPORD_SPS\0" \ +"\x0b" "VCMPEQ_USPS\0" "\x0c" "VCMPNGE_UQPS\0" "\x0c" "VCMPNGT_UQPS\0" \ +"\x0e" "VCMPFALSE_OSPS\0" "\x0c" "VCMPNEQ_OSPS\0" "\x0b" "VCMPGE_OQPS\0" \ +"\x0b" "VCMPGT_OQPS\0" "\x0d" "VCMPTRUE_USPS\0" "\x08" "VCMPEQPD\0" "\x08" "VCMPLTPD\0" \ +"\x08" "VCMPLEPD\0" "\x0b" "VCMPUNORDPD\0" "\x09" "VCMPNEQPD\0" "\x09" "VCMPNLTPD\0" \ +"\x09" "VCMPNLEPD\0" "\x09" "VCMPORDPD\0" "\x0b" "VCMPEQ_UQPD\0" "\x09" "VCMPNGEPD\0" \ +"\x09" "VCMPNGTPD\0" "\x0b" "VCMPFALSEPD\0" "\x0c" "VCMPNEQ_OQPD\0" "\x08" "VCMPGEPD\0" \ +"\x08" "VCMPGTPD\0" "\x0a" "VCMPTRUEPD\0" "\x0b" "VCMPEQ_OSPD\0" "\x0b" "VCMPLT_OQPD\0" \ +"\x0b" "VCMPLE_OQPD\0" "\x0d" "VCMPUNORD_SPD\0" "\x0c" "VCMPNEQ_USPD\0" \ +"\x0c" "VCMPNLT_UQPD\0" "\x0c" "VCMPNLE_UQPD\0" "\x0b" "VCMPORD_SPD\0" \ +"\x0b" "VCMPEQ_USPD\0" "\x0c" "VCMPNGE_UQPD\0" "\x0c" "VCMPNGT_UQPD\0" \ +"\x0e" "VCMPFALSE_OSPD\0" "\x0c" "VCMPNEQ_OSPD\0" "\x0b" "VCMPGE_OQPD\0" \ +"\x0b" "VCMPGT_OQPD\0" "\x0d" "VCMPTRUE_USPD\0" "\x08" "VCMPEQSS\0" "\x08" "VCMPLTSS\0" \ +"\x08" "VCMPLESS\0" "\x0b" "VCMPUNORDSS\0" "\x09" "VCMPNEQSS\0" "\x09" "VCMPNLTSS\0" \ +"\x09" "VCMPNLESS\0" "\x09" "VCMPORDSS\0" "\x0b" "VCMPEQ_UQSS\0" "\x09" "VCMPNGESS\0" \ +"\x09" "VCMPNGTSS\0" "\x0b" "VCMPFALSESS\0" "\x0c" "VCMPNEQ_OQSS\0" "\x08" "VCMPGESS\0" \ +"\x08" "VCMPGTSS\0" "\x0a" "VCMPTRUESS\0" "\x0b" "VCMPEQ_OSSS\0" "\x0b" "VCMPLT_OQSS\0" \ +"\x0b" "VCMPLE_OQSS\0" "\x0d" "VCMPUNORD_SSS\0" "\x0c" "VCMPNEQ_USSS\0" \ +"\x0c" "VCMPNLT_UQSS\0" "\x0c" "VCMPNLE_UQSS\0" "\x0b" "VCMPORD_SSS\0" \ +"\x0b" "VCMPEQ_USSS\0" "\x0c" "VCMPNGE_UQSS\0" "\x0c" "VCMPNGT_UQSS\0" \ +"\x0e" "VCMPFALSE_OSSS\0" "\x0c" "VCMPNEQ_OSSS\0" "\x0b" "VCMPGE_OQSS\0" \ +"\x0b" "VCMPGT_OQSS\0" "\x0d" "VCMPTRUE_USSS\0" "\x08" "VCMPEQSD\0" "\x08" "VCMPLTSD\0" \ +"\x08" "VCMPLESD\0" "\x0b" "VCMPUNORDSD\0" "\x09" "VCMPNEQSD\0" "\x09" "VCMPNLTSD\0" \ +"\x09" "VCMPNLESD\0" "\x09" "VCMPORDSD\0" "\x0b" "VCMPEQ_UQSD\0" "\x09" "VCMPNGESD\0" \ +"\x09" "VCMPNGTSD\0" "\x0b" "VCMPFALSESD\0" "\x0c" "VCMPNEQ_OQSD\0" "\x08" "VCMPGESD\0" \ +"\x08" "VCMPGTSD\0" "\x0a" "VCMPTRUESD\0" "\x0b" "VCMPEQ_OSSD\0" "\x0b" "VCMPLT_OQSD\0" \ +"\x0b" "VCMPLE_OQSD\0" "\x0d" "VCMPUNORD_SSD\0" "\x0c" "VCMPNEQ_USSD\0" \ +"\x0c" "VCMPNLT_UQSD\0" "\x0c" "VCMPNLE_UQSD\0" "\x0b" "VCMPORD_SSD\0" \ +"\x0b" "VCMPEQ_USSD\0" "\x0c" "VCMPNGE_UQSD\0" "\x0c" "VCMPNGT_UQSD\0" \ +"\x0e" "VCMPFALSE_OSSD\0" "\x0c" "VCMPNEQ_OSSD\0" "\x0b" "VCMPGE_OQSD\0" \ +"\x0b" "VCMPGT_OQSD\0" "\x0d" "VCMPTRUE_USSD\0" "\x06" "PINSRW\0" "\x07" "VPINSRW\0" \ +"\x06" "PEXTRW\0" "\x07" "VPEXTRW\0" "\x06" "SHUFPS\0" "\x06" "SHUFPD\0" \ +"\x07" "VSHUFPS\0" "\x07" "VSHUFPD\0" "\x09" "CMPXCHG8B\0" "\x0a" "CMPXCHG16B\0" \ +"\x07" "VMPTRST\0" "\x08" "ADDSUBPD\0" "\x08" "ADDSUBPS\0" "\x09" "VADDSUBPD\0" \ +"\x09" "VADDSUBPS\0" "\x05" "PSRLW\0" "\x06" "VPSRLW\0" "\x05" "PSRLD\0" \ +"\x06" "VPSRLD\0" "\x05" "PSRLQ\0" "\x06" "VPSRLQ\0" "\x05" "PADDQ\0" \ +"\x06" "VPADDQ\0" "\x06" "PMULLW\0" "\x07" "VPMULLW\0" "\x07" "MOVQ2DQ\0" \ +"\x07" "MOVDQ2Q\0" "\x08" "PMOVMSKB\0" "\x09" "VPMOVMSKB\0" "\x07" "PSUBUSB\0" \ +"\x08" "VPSUBUSB\0" "\x07" "PSUBUSW\0" "\x08" "VPSUBUSW\0" "\x06" "PMINUB\0" \ +"\x07" "VPMINUB\0" "\x04" "PAND\0" "\x05" "VPAND\0" "\x07" "PADDUSB\0" \ +"\x08" "VPADDUSW\0" "\x07" "PADDUSW\0" "\x06" "PMAXUB\0" "\x07" "VPMAXUB\0" \ +"\x05" "PANDN\0" "\x06" "VPANDN\0" "\x05" "PAVGB\0" "\x06" "VPAVGB\0" \ +"\x05" "PSRAW\0" "\x06" "VPSRAW\0" "\x05" "PSRAD\0" "\x06" "VPSRAD\0" \ +"\x05" "PAVGW\0" "\x06" "VPAVGW\0" "\x07" "PMULHUW\0" "\x08" "VPMULHUW\0" \ +"\x06" "PMULHW\0" "\x07" "VPMULHW\0" "\x09" "CVTTPD2DQ\0" "\x08" "CVTDQ2PD\0" \ +"\x08" "CVTPD2DQ\0" "\x0a" "VCVTTPD2DQ\0" "\x09" "VCVTDQ2PD\0" "\x09" "VCVTPD2DQ\0" \ +"\x06" "MOVNTQ\0" "\x07" "MOVNTDQ\0" "\x08" "VMOVNTDQ\0" "\x06" "PSUBSB\0" \ +"\x07" "VPSUBSB\0" "\x06" "PSUBSW\0" "\x07" "VPSUBSW\0" "\x06" "PMINSW\0" \ +"\x07" "VPMINSW\0" "\x03" "POR\0" "\x04" "VPOR\0" "\x06" "PADDSB\0" \ +"\x07" "VPADDSB\0" "\x06" "PADDSW\0" "\x07" "VPADDSW\0" "\x06" "PMAXSW\0" \ +"\x07" "VPMAXSW\0" "\x04" "PXOR\0" "\x05" "VPXOR\0" "\x05" "LDDQU\0" \ +"\x06" "VLDDQU\0" "\x05" "PSLLW\0" "\x06" "VPSLLW\0" "\x05" "PSLLD\0" \ +"\x06" "VPSLLD\0" "\x05" "PSLLQ\0" "\x06" "VPSLLQ\0" "\x07" "PMULUDQ\0" \ +"\x08" "VPMULUDQ\0" "\x07" "PMADDWD\0" "\x08" "VPMADDWD\0" "\x06" "PSADBW\0" \ +"\x07" "VPSADBW\0" "\x08" "MASKMOVQ\0" "\x0a" "MASKMOVDQU\0" "\x0b" "VMASKMOVDQU\0" \ +"\x05" "PSUBB\0" "\x06" "VPSUBB\0" "\x05" "PSUBW\0" "\x06" "VPSUBW\0" \ +"\x05" "PSUBD\0" "\x06" "VPSUBD\0" "\x05" "PSUBQ\0" "\x06" "VPSUBQ\0" \ +"\x05" "PADDB\0" "\x06" "VPADDB\0" "\x05" "PADDW\0" "\x06" "VPADDW\0" \ +"\x05" "PADDD\0" "\x06" "VPADDD\0" "\x07" "FNSTENV\0" "\x06" "FSTENV\0" \ +"\x06" "FNSTCW\0" "\x05" "FSTCW\0" "\x06" "FNCLEX\0" "\x05" "FCLEX\0" \ +"\x06" "FNINIT\0" "\x05" "FINIT\0" "\x06" "FNSAVE\0" "\x05" "FSAVE\0" \ +"\x06" "FNSTSW\0" "\x05" "FSTSW\0" "\x06" "PSHUFB\0" "\x07" "VPSHUFB\0" \ +"\x06" "PHADDW\0" "\x07" "VPHADDW\0" "\x06" "PHADDD\0" "\x07" "VPHADDD\0" \ +"\x07" "PHADDSW\0" "\x08" "VPHADDSW\0" "\x09" "PMADDUBSW\0" "\x0a" "VPMADDUBSW\0" \ +"\x06" "PHSUBW\0" "\x07" "VPHSUBW\0" "\x06" "PHSUBD\0" "\x07" "VPHSUBD\0" \ +"\x07" "PHSUBSW\0" "\x08" "VPHSUBSW\0" "\x06" "PSIGNB\0" "\x07" "VPSIGNB\0" \ +"\x06" "PSIGNW\0" "\x07" "VPSIGNW\0" "\x06" "PSIGND\0" "\x07" "VPSIGND\0" \ +"\x08" "PMULHRSW\0" "\x09" "VPMULHRSW\0" "\x09" "VPERMILPS\0" "\x09" "VPERMILPD\0" \ +"\x07" "VTESTPS\0" "\x07" "VTESTPD\0" "\x08" "PBLENDVB\0" "\x08" "BLENDVPS\0" \ +"\x08" "BLENDVPD\0" "\x05" "PTEST\0" "\x06" "VPTEST\0" "\x0c" "VBROADCASTSS\0" \ +"\x0c" "VBROADCASTSD\0" "\x0e" "VBROADCASTF128\0" "\x05" "PABSB\0" "\x06" "VPABSB\0" \ +"\x05" "PABSW\0" "\x06" "VPABSW\0" "\x05" "PABSD\0" "\x06" "VPABSD\0" \ +"\x08" "PMOVSXBW\0" "\x09" "VPMOVSXBW\0" "\x08" "PMOVSXBD\0" "\x09" "VPMOVSXBD\0" \ +"\x08" "PMOVSXBQ\0" "\x09" "VPMOVSXBQ\0" "\x08" "PMOVSXWD\0" "\x09" "VPMOVSXWD\0" \ +"\x08" "PMOVSXWQ\0" "\x09" "VPMOVSXWQ\0" "\x08" "PMOVSXDQ\0" "\x09" "VPMOVSXDQ\0" \ +"\x06" "PMULDQ\0" "\x07" "VPMULDQ\0" "\x07" "PCMPEQQ\0" "\x08" "VPCMPEQQ\0" \ +"\x08" "MOVNTDQA\0" "\x09" "VMOVNTDQA\0" "\x08" "PACKUSDW\0" "\x09" "VPACKUSDW\0" \ +"\x0a" "VMASKMOVPS\0" "\x0a" "VMASKMOVPD\0" "\x08" "PMOVZXBW\0" "\x09" "VPMOVZXBW\0" \ +"\x08" "PMOVZXBD\0" "\x09" "VPMOVZXBD\0" "\x08" "PMOVZXBQ\0" "\x09" "VPMOVZXBQ\0" \ +"\x08" "PMOVZXWD\0" "\x09" "VPMOVZXWD\0" "\x08" "PMOVZXWQ\0" "\x09" "VPMOVZXWQ\0" \ +"\x08" "PMOVZXDQ\0" "\x09" "VPMOVZXDQ\0" "\x07" "PCMPGTQ\0" "\x08" "VPCMPGTQ\0" \ +"\x06" "PMINSB\0" "\x07" "VPMINSB\0" "\x06" "PMINSD\0" "\x07" "VPMINSD\0" \ +"\x06" "PMINUW\0" "\x07" "VPMINUW\0" "\x06" "PMINUD\0" "\x07" "VPMINUD\0" \ +"\x06" "PMAXSB\0" "\x07" "VPMAXSB\0" "\x06" "PMAXSD\0" "\x07" "VPMAXSD\0" \ +"\x06" "PMAXUW\0" "\x07" "VPMAXUW\0" "\x06" "PMAXUD\0" "\x07" "VPMAXUD\0" \ +"\x06" "PMULLD\0" "\x07" "VPMULLD\0" "\x0a" "PHMINPOSUW\0" "\x0b" "VPHMINPOSUW\0" \ +"\x06" "INVEPT\0" "\x07" "INVVPID\0" "\x07" "INVPCID\0" "\x0e" "VFMADDSUB132PS\0" \ +"\x0e" "VFMADDSUB132PD\0" "\x0e" "VFMSUBADD132PS\0" "\x0e" "VFMSUBADD132PD\0" \ +"\x0b" "VFMADD132PS\0" "\x0b" "VFMADD132PD\0" "\x0b" "VFMADD132SS\0" \ +"\x0b" "VFMADD132SD\0" "\x0b" "VFMSUB132PS\0" "\x0b" "VFMSUB132PD\0" \ +"\x0b" "VFMSUB132SS\0" "\x0b" "VFMSUB132SD\0" "\x0c" "VFNMADD132PS\0" \ +"\x0c" "VFNMADD132PD\0" "\x0c" "VFNMADD132SS\0" "\x0c" "VFNMADD132SD\0" \ +"\x0c" "VFNMSUB132PS\0" "\x0c" "VFNMSUB132PD\0" "\x0c" "VFNMSUB132SS\0" \ +"\x0c" "VFNMSUB132SD\0" "\x0e" "VFMADDSUB213PS\0" "\x0e" "VFMADDSUB213PD\0" \ +"\x0e" "VFMSUBADD213PS\0" "\x0e" "VFMSUBADD213PD\0" "\x0b" "VFMADD213PS\0" \ +"\x0b" "VFMADD213PD\0" "\x0b" "VFMADD213SS\0" "\x0b" "VFMADD213SD\0" \ +"\x0b" "VFMSUB213PS\0" "\x0b" "VFMSUB213PD\0" "\x0b" "VFMSUB213SS\0" \ +"\x0b" "VFMSUB213SD\0" "\x0c" "VFNMADD213PS\0" "\x0c" "VFNMADD213PD\0" \ +"\x0c" "VFNMADD213SS\0" "\x0c" "VFNMADD213SD\0" "\x0c" "VFNMSUB213PS\0" \ +"\x0c" "VFNMSUB213PD\0" "\x0c" "VFNMSUB213SS\0" "\x0c" "VFNMSUB213SD\0" \ +"\x0e" "VFMADDSUB231PS\0" "\x0e" "VFMADDSUB231PD\0" "\x0e" "VFMSUBADD231PS\0" \ +"\x0e" "VFMSUBADD231PD\0" "\x0b" "VFMADD231PS\0" "\x0b" "VFMADD231PD\0" \ +"\x0b" "VFMADD231SS\0" "\x0b" "VFMADD231SD\0" "\x0b" "VFMSUB231PS\0" \ +"\x0b" "VFMSUB231PD\0" "\x0b" "VFMSUB231SS\0" "\x0b" "VFMSUB231SD\0" \ +"\x0c" "VFNMADD231PS\0" "\x0c" "VFNMADD231PD\0" "\x0c" "VFNMADD231SS\0" \ +"\x0c" "VFNMADD231SD\0" "\x0c" "VFNMSUB231PS\0" "\x0c" "VFNMSUB231PD\0" \ +"\x0c" "VFNMSUB231SS\0" "\x0c" "VFNMSUB231SD\0" "\x06" "AESIMC\0" "\x07" "VAESIMC\0" \ +"\x06" "AESENC\0" "\x07" "VAESENC\0" "\x0a" "AESENCLAST\0" "\x0b" "VAESENCLAST\0" \ +"\x06" "AESDEC\0" "\x07" "VAESDEC\0" "\x0a" "AESDECLAST\0" "\x0b" "VAESDECLAST\0" \ +"\x05" "MOVBE\0" "\x05" "CRC32\0" "\x0a" "VPERM2F128\0" "\x07" "ROUNDPS\0" \ +"\x08" "VROUNDPS\0" "\x07" "ROUNDPD\0" "\x08" "VROUNDPD\0" "\x07" "ROUNDSS\0" \ +"\x08" "VROUNDSS\0" "\x07" "ROUNDSD\0" "\x08" "VROUNDSD\0" "\x07" "BLENDPS\0" \ +"\x08" "VBLENDPS\0" "\x07" "BLENDPD\0" "\x08" "VBLENDPD\0" "\x07" "PBLENDW\0" \ +"\x08" "VPBLENDW\0" "\x07" "PALIGNR\0" "\x08" "VPALIGNR\0" "\x06" "PEXTRB\0" \ +"\x07" "VPEXTRB\0" "\x06" "PEXTRD\0" "\x06" "PEXTRQ\0" "\x07" "VPEXTRD\0" \ +"\x07" "VPEXTRQ\0" "\x09" "EXTRACTPS\0" "\x0a" "VEXTRACTPS\0" "\x0b" "VINSERTF128\0" \ +"\x0c" "VEXTRACTF128\0" "\x06" "PINSRB\0" "\x07" "VPINSRB\0" "\x08" "INSERTPS\0" \ +"\x09" "VINSERTPS\0" "\x06" "PINSRD\0" "\x06" "PINSRQ\0" "\x07" "VPINSRD\0" \ +"\x07" "VPINSRQ\0" "\x04" "DPPS\0" "\x05" "VDPPS\0" "\x04" "DPPD\0" \ +"\x05" "VDPPD\0" "\x07" "MPSADBW\0" "\x08" "VMPSADBW\0" "\x09" "PCLMULQDQ\0" \ +"\x0a" "VPCLMULQDQ\0" "\x09" "VBLENDVPS\0" "\x09" "VBLENDVPD\0" "\x09" "VPBLENDVB\0" \ +"\x09" "PCMPESTRM\0" "\x0a" "VPCMPESTRM\0" "\x09" "PCMPESTRI\0" "\x0a" "VPCMPESTRI\0" \ +"\x09" "PCMPISTRM\0" "\x0a" "VPCMPISTRM\0" "\x09" "PCMPISTRI\0" "\x0a" "VPCMPISTRI\0" \ +"\x0f" "AESKEYGENASSIST\0" "\x10" "VAESKEYGENASSIST\0" "\x06" "PSRLDQ\0" \ +"\x07" "VPSRLDQ\0" "\x06" "PSLLDQ\0" "\x07" "VPSLLDQ\0" "\x06" "FXSAVE\0" \ +"\x08" "FXSAVE64\0" "\x08" "RDFSBASE\0" "\x07" "FXRSTOR\0" "\x09" "FXRSTOR64\0" \ +"\x08" "RDGSBASE\0" "\x07" "LDMXCSR\0" "\x08" "WRFSBASE\0" "\x08" "VLDMXCSR\0" \ +"\x07" "STMXCSR\0" "\x08" "WRGSBASE\0" "\x08" "VSTMXCSR\0" "\x07" "VMPTRLD\0" \ +"\x07" "VMCLEAR\0" "\x05" "VMXON\0" "\x06" "MOVSXD\0" "\x05" "PAUSE\0" \ +"\x04" "WAIT\0" "\x06" "RDRAND\0" "\x06" "_3DNOW\0"; + +const _WRegister _REGISTERS[] = { + { 3, "RAX" }, { 3, "RCX" }, { 3, "RDX" }, { 3, "RBX" }, { 3, "RSP" }, { 3, "RBP" }, { 3, "RSI" }, { 3, "RDI" }, { 2, "R8" }, { 2, "R9" }, { 3, "R10" }, { 3, "R11" }, { 3, "R12" }, { 3, "R13" }, { 3, "R14" }, { 3, "R15" }, + { 3, "EAX" }, { 3, "ECX" }, { 3, "EDX" }, { 3, "EBX" }, { 3, "ESP" }, { 3, "EBP" }, { 3, "ESI" }, { 3, "EDI" }, { 3, "R8D" }, { 3, "R9D" }, { 4, "R10D" }, { 4, "R11D" }, { 4, "R12D" }, { 4, "R13D" }, { 4, "R14D" }, { 4, "R15D" }, + { 2, "AX" }, { 2, "CX" }, { 2, "DX" }, { 2, "BX" }, { 2, "SP" }, { 2, "BP" }, { 2, "SI" }, { 2, "DI" }, { 3, "R8W" }, { 3, "R9W" }, { 4, "R10W" }, { 4, "R11W" }, { 4, "R12W" }, { 4, "R13W" }, { 4, "R14W" }, { 4, "R15W" }, + { 2, "AL" }, { 2, "CL" }, { 2, "DL" }, { 2, "BL" }, { 2, "AH" }, { 2, "CH" }, { 2, "DH" }, { 2, "BH" }, { 3, "R8B" }, { 3, "R9B" }, { 4, "R10B" }, { 4, "R11B" }, { 4, "R12B" }, { 4, "R13B" }, { 4, "R14B" }, { 4, "R15B" }, + { 3, "SPL" }, { 3, "BPL" }, { 3, "SIL" }, { 3, "DIL" }, + { 2, "ES" }, { 2, "CS" }, { 2, "SS" }, { 2, "DS" }, { 2, "FS" }, { 2, "GS" }, + { 3, "RIP" }, + { 3, "ST0" }, { 3, "ST1" }, { 3, "ST2" }, { 3, "ST3" }, { 3, "ST4" }, { 3, "ST5" }, { 3, "ST6" }, { 3, "ST7" }, + { 3, "MM0" }, { 3, "MM1" }, { 3, "MM2" }, { 3, "MM3" }, { 3, "MM4" }, { 3, "MM5" }, { 3, "MM6" }, { 3, "MM7" }, + { 4, "XMM0" }, { 4, "XMM1" }, { 4, "XMM2" }, { 4, "XMM3" }, { 4, "XMM4" }, { 4, "XMM5" }, { 4, "XMM6" }, { 4, "XMM7" }, { 4, "XMM8" }, { 4, "XMM9" }, { 5, "XMM10" }, { 5, "XMM11" }, { 5, "XMM12" }, { 5, "XMM13" }, { 5, "XMM14" }, { 5, "XMM15" }, + { 4, "YMM0" }, { 4, "YMM1" }, { 4, "YMM2" }, { 4, "YMM3" }, { 4, "YMM4" }, { 4, "YMM5" }, { 4, "YMM6" }, { 4, "YMM7" }, { 4, "YMM8" }, { 4, "YMM9" }, { 5, "YMM10" }, { 5, "YMM11" }, { 5, "YMM12" }, { 5, "YMM13" }, { 5, "YMM14" }, { 5, "YMM15" }, + { 3, "CR0" }, { 0, "" }, { 3, "CR2" }, { 3, "CR3" }, { 3, "CR4" }, { 0, "" }, { 0, "" }, { 0, "" }, { 3, "CR8" }, + { 3, "DR0" }, { 3, "DR1" }, { 3, "DR2" }, { 3, "DR3" }, { 0, "" }, { 0, "" }, { 3, "DR6" }, { 3, "DR7" } +}; + +#endif /* DISTORM_LIGHT */ diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/mnemonics.h b/module/src/main/cpp/whale/src/dbi/x86/distorm/mnemonics.h new file mode 100644 index 00000000..32d71be8 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/mnemonics.h @@ -0,0 +1,301 @@ +/* +mnemonics.h + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ + + +#ifndef MNEMONICS_H +#define MNEMONICS_H + +#ifdef __cplusplus + extern "C" { +#endif + +#ifndef DISTORM_LIGHT + +typedef struct WMnemonic { + unsigned char length; + unsigned char p[1]; /* p is a null terminated string, which contains 'length' characters. */ +} _WMnemonic; + +typedef struct WRegister { + unsigned int length; + unsigned char p[6]; /* p is a null terminated string. */ +} _WRegister; + +extern const unsigned char _MNEMONICS[]; +extern const _WRegister _REGISTERS[]; + +#endif /* DISTORM_LIGHT */ + +#ifdef __cplusplus +} /* End Of Extern */ +#endif + +#define GET_REGISTER_NAME(r) (unsigned char*)_REGISTERS[(r)].p +#define GET_MNEMONIC_NAME(m) ((_WMnemonic*)&_MNEMONICS[(m)])->p + + typedef enum { + I_UNDEFINED = 0, I_AAA = 66, I_AAD = 389, I_AAM = 384, I_AAS = 76, I_ADC = 31, I_ADD = 11, I_ADDPD = 3132, + I_ADDPS = 3125, I_ADDSD = 3146, I_ADDSS = 3139, I_ADDSUBPD = 6416, I_ADDSUBPS = 6426, + I_AESDEC = 9231, I_AESDECLAST = 9248, I_AESENC = 9189, I_AESENCLAST = 9206, + I_AESIMC = 9172, I_AESKEYGENASSIST = 9817, I_AND = 41, I_ANDNPD = 3043, I_ANDNPS = 3035, + I_ANDPD = 3012, I_ANDPS = 3005, I_ARPL = 111, I_BLENDPD = 9394, I_BLENDPS = 9375, + I_BLENDVPD = 7641, I_BLENDVPS = 7631, I_BOUND = 104, I_BSF = 4368, I_BSR = 4380, + I_BSWAP = 960, I_BT = 872, I_BTC = 934, I_BTR = 912, I_BTS = 887, I_CALL = 456, + I_CALL_FAR = 260, I_CBW = 228, I_CDQ = 250, I_CDQE = 239, I_CLC = 492, I_CLD = 512, + I_CLFLUSH = 4351, I_CLGI = 1855, I_CLI = 502, I_CLTS = 541, I_CMC = 487, I_CMOVA = 694, + I_CMOVAE = 663, I_CMOVB = 656, I_CMOVBE = 686, I_CMOVG = 754, I_CMOVGE = 738, + I_CMOVL = 731, I_CMOVLE = 746, I_CMOVNO = 648, I_CMOVNP = 723, I_CMOVNS = 708, + I_CMOVNZ = 678, I_CMOVO = 641, I_CMOVP = 716, I_CMOVS = 701, I_CMOVZ = 671, + I_CMP = 71, I_CMPEQPD = 4471, I_CMPEQPS = 4392, I_CMPEQSD = 4629, I_CMPEQSS = 4550, + I_CMPLEPD = 4489, I_CMPLEPS = 4410, I_CMPLESD = 4647, I_CMPLESS = 4568, I_CMPLTPD = 4480, + I_CMPLTPS = 4401, I_CMPLTSD = 4638, I_CMPLTSS = 4559, I_CMPNEQPD = 4510, I_CMPNEQPS = 4431, + I_CMPNEQSD = 4668, I_CMPNEQSS = 4589, I_CMPNLEPD = 4530, I_CMPNLEPS = 4451, + I_CMPNLESD = 4688, I_CMPNLESS = 4609, I_CMPNLTPD = 4520, I_CMPNLTPS = 4441, + I_CMPNLTSD = 4678, I_CMPNLTSS = 4599, I_CMPORDPD = 4540, I_CMPORDPS = 4461, + I_CMPORDSD = 4698, I_CMPORDSS = 4619, I_CMPS = 301, I_CMPUNORDPD = 4498, I_CMPUNORDPS = 4419, + I_CMPUNORDSD = 4656, I_CMPUNORDSS = 4577, I_CMPXCHG = 898, I_CMPXCHG16B = 6395, + I_CMPXCHG8B = 6384, I_COMISD = 2801, I_COMISS = 2793, I_CPUID = 865, I_CQO = 255, + I_CRC32 = 9280, I_CVTDQ2PD = 6809, I_CVTDQ2PS = 3329, I_CVTPD2DQ = 6819, I_CVTPD2PI = 2703, + I_CVTPD2PS = 3255, I_CVTPH2PS = 4183, I_CVTPI2PD = 2517, I_CVTPI2PS = 2507, + I_CVTPS2DQ = 3339, I_CVTPS2PD = 3245, I_CVTPS2PH = 4193, I_CVTPS2PI = 2693, + I_CVTSD2SI = 2723, I_CVTSD2SS = 3275, I_CVTSI2SD = 2537, I_CVTSI2SS = 2527, + I_CVTSS2SD = 3265, I_CVTSS2SI = 2713, I_CVTTPD2DQ = 6798, I_CVTTPD2PI = 2636, + I_CVTTPS2DQ = 3349, I_CVTTPS2PI = 2625, I_CVTTSD2SI = 2658, I_CVTTSS2SI = 2647, + I_CWD = 245, I_CWDE = 233, I_DAA = 46, I_DAS = 56, I_DEC = 86, I_DIV = 1646, + I_DIVPD = 3521, I_DIVPS = 3514, I_DIVSD = 3535, I_DIVSS = 3528, I_DPPD = 9637, + I_DPPS = 9624, I_EMMS = 4122, I_ENTER = 340, I_EXTRACTPS = 9502, I_EXTRQ = 4158, + I_F2XM1 = 1192, I_FABS = 1123, I_FADD = 1023, I_FADDP = 1549, I_FBLD = 1601, + I_FBSTP = 1607, I_FCHS = 1117, I_FCLEX = 7311, I_FCMOVB = 1376, I_FCMOVBE = 1392, + I_FCMOVE = 1384, I_FCMOVNB = 1445, I_FCMOVNBE = 1463, I_FCMOVNE = 1454, I_FCMOVNU = 1473, + I_FCMOVU = 1401, I_FCOM = 1035, I_FCOMI = 1512, I_FCOMIP = 1623, I_FCOMP = 1041, + I_FCOMPP = 1563, I_FCOS = 1311, I_FDECSTP = 1238, I_FDIV = 1061, I_FDIVP = 1594, + I_FDIVR = 1067, I_FDIVRP = 1586, I_FEDISI = 1488, I_FEMMS = 574, I_FENI = 1482, + I_FFREE = 1527, I_FIADD = 1317, I_FICOM = 1331, I_FICOMP = 1338, I_FIDIV = 1361, + I_FIDIVR = 1368, I_FILD = 1418, I_FIMUL = 1324, I_FINCSTP = 1247, I_FINIT = 7326, + I_FIST = 1432, I_FISTP = 1438, I_FISTTP = 1424, I_FISUB = 1346, I_FISUBR = 1353, + I_FLD = 1074, I_FLD1 = 1141, I_FLDCW = 1098, I_FLDENV = 1090, I_FLDL2E = 1155, + I_FLDL2T = 1147, I_FLDLG2 = 1170, I_FLDLN2 = 1178, I_FLDPI = 1163, I_FLDZ = 1186, + I_FMUL = 1029, I_FMULP = 1556, I_FNCLEX = 7303, I_FNINIT = 7318, I_FNOP = 1111, + I_FNSAVE = 7333, I_FNSTCW = 7288, I_FNSTENV = 7271, I_FNSTSW = 7348, I_FPATAN = 1213, + I_FPREM = 1256, I_FPREM1 = 1230, I_FPTAN = 1206, I_FRNDINT = 1288, I_FRSTOR = 1519, + I_FSAVE = 7341, I_FSCALE = 1297, I_FSETPM = 1496, I_FSIN = 1305, I_FSINCOS = 1279, + I_FSQRT = 1272, I_FST = 1079, I_FSTCW = 7296, I_FSTENV = 7280, I_FSTP = 1084, + I_FSTSW = 7356, I_FSUB = 1048, I_FSUBP = 1579, I_FSUBR = 1054, I_FSUBRP = 1571, + I_FTST = 1129, I_FUCOM = 1534, I_FUCOMI = 1504, I_FUCOMIP = 1614, I_FUCOMP = 1541, + I_FUCOMPP = 1409, I_FXAM = 1135, I_FXCH = 1105, I_FXRSTOR = 9914, I_FXRSTOR64 = 9923, + I_FXSAVE = 9886, I_FXSAVE64 = 9894, I_FXTRACT = 1221, I_FYL2X = 1199, I_FYL2XP1 = 1263, + I_GETSEC = 633, I_HADDPD = 4203, I_HADDPS = 4211, I_HLT = 482, I_HSUBPD = 4237, + I_HSUBPS = 4245, I_IDIV = 1651, I_IMUL = 117, I_IN = 447, I_INC = 81, I_INS = 123, + I_INSERTPS = 9569, I_INSERTQ = 4165, I_INT = 367, I_INT_3 = 360, I_INT1 = 476, + I_INTO = 372, I_INVD = 555, I_INVEPT = 8306, I_INVLPG = 1727, I_INVLPGA = 1869, + I_INVPCID = 8323, I_INVVPID = 8314, I_IRET = 378, I_JA = 166, I_JAE = 147, + I_JB = 143, I_JBE = 161, I_JCXZ = 427, I_JECXZ = 433, I_JG = 202, I_JGE = 192, + I_JL = 188, I_JLE = 197, I_JMP = 462, I_JMP_FAR = 467, I_JNO = 138, I_JNP = 183, + I_JNS = 174, I_JNZ = 156, I_JO = 134, I_JP = 179, I_JRCXZ = 440, I_JS = 170, + I_JZ = 152, I_LAHF = 289, I_LAR = 522, I_LDDQU = 7016, I_LDMXCSR = 9944, I_LDS = 335, + I_LEA = 223, I_LEAVE = 347, I_LES = 330, I_LFENCE = 4287, I_LFS = 917, I_LGDT = 1703, + I_LGS = 922, I_LIDT = 1709, I_LLDT = 1668, I_LMSW = 1721, I_LODS = 313, I_LOOP = 421, + I_LOOPNZ = 406, I_LOOPZ = 414, I_LSL = 527, I_LSS = 907, I_LTR = 1674, I_LZCNT = 4385, + I_MASKMOVDQU = 7141, I_MASKMOVQ = 7131, I_MAXPD = 3581, I_MAXPS = 3574, I_MAXSD = 3595, + I_MAXSS = 3588, I_MFENCE = 4313, I_MINPD = 3461, I_MINPS = 3454, I_MINSD = 3475, + I_MINSS = 3468, I_MONITOR = 1771, I_MOV = 218, I_MOVAPD = 2481, I_MOVAPS = 2473, + I_MOVBE = 9273, I_MOVD = 3942, I_MOVDDUP = 2208, I_MOVDQ2Q = 6544, I_MOVDQA = 3968, + I_MOVDQU = 3976, I_MOVHLPS = 2173, I_MOVHPD = 2367, I_MOVHPS = 2359, I_MOVLHPS = 2350, + I_MOVLPD = 2190, I_MOVLPS = 2182, I_MOVMSKPD = 2837, I_MOVMSKPS = 2827, I_MOVNTDQ = 6871, + I_MOVNTDQA = 7917, I_MOVNTI = 952, I_MOVNTPD = 2578, I_MOVNTPS = 2569, I_MOVNTQ = 6863, + I_MOVNTSD = 2596, I_MOVNTSS = 2587, I_MOVQ = 3948, I_MOVQ2DQ = 6535, I_MOVS = 295, + I_MOVSD = 2132, I_MOVSHDUP = 2375, I_MOVSLDUP = 2198, I_MOVSS = 2125, I_MOVSX = 939, + I_MOVSXD = 10027, I_MOVUPD = 2117, I_MOVUPS = 2109, I_MOVZX = 927, I_MPSADBW = 9650, + I_MUL = 1641, I_MULPD = 3192, I_MULPS = 3185, I_MULSD = 3206, I_MULSS = 3199, + I_MWAIT = 1780, I_NEG = 1636, I_NOP = 581, I_NOT = 1631, I_OR = 27, I_ORPD = 3075, + I_ORPS = 3069, I_OUT = 451, I_OUTS = 128, I_PABSB = 7710, I_PABSD = 7740, I_PABSW = 7725, + I_PACKSSDW = 3871, I_PACKSSWB = 3703, I_PACKUSDW = 7938, I_PACKUSWB = 3781, + I_PADDB = 7226, I_PADDD = 7256, I_PADDQ = 6503, I_PADDSB = 6952, I_PADDSW = 6969, + I_PADDUSB = 6642, I_PADDUSW = 6661, I_PADDW = 7241, I_PALIGNR = 9432, I_PAND = 6629, + I_PANDN = 6687, I_PAUSE = 10035, I_PAVGB = 6702, I_PAVGUSB = 2100, I_PAVGW = 6747, + I_PBLENDVB = 7621, I_PBLENDW = 9413, I_PCLMULQDQ = 9669, I_PCMPEQB = 4065, + I_PCMPEQD = 4103, I_PCMPEQQ = 7898, I_PCMPEQW = 4084, I_PCMPESTRI = 9748, + I_PCMPESTRM = 9725, I_PCMPGTB = 3724, I_PCMPGTD = 3762, I_PCMPGTQ = 8109, + I_PCMPGTW = 3743, I_PCMPISTRI = 9794, I_PCMPISTRM = 9771, I_PEXTRB = 9451, + I_PEXTRD = 9468, I_PEXTRQ = 9476, I_PEXTRW = 6333, I_PF2ID = 1936, I_PF2IW = 1929, + I_PFACC = 2050, I_PFADD = 1999, I_PFCMPEQ = 2057, I_PFCMPGE = 1960, I_PFCMPGT = 2006, + I_PFMAX = 2015, I_PFMIN = 1969, I_PFMUL = 2066, I_PFNACC = 1943, I_PFPNACC = 1951, + I_PFRCP = 1976, I_PFRCPIT1 = 2022, I_PFRCPIT2 = 2073, I_PFRSQIT1 = 2032, I_PFRSQRT = 1983, + I_PFSUB = 1992, I_PFSUBR = 2042, I_PHADDD = 7397, I_PHADDSW = 7414, I_PHADDW = 7380, + I_PHMINPOSUW = 8281, I_PHSUBD = 7473, I_PHSUBSW = 7490, I_PHSUBW = 7456, I_PI2FD = 1922, + I_PI2FW = 1915, I_PINSRB = 9552, I_PINSRD = 9590, I_PINSRQ = 9598, I_PINSRW = 6316, + I_PMADDUBSW = 7433, I_PMADDWD = 7095, I_PMAXSB = 8196, I_PMAXSD = 8213, I_PMAXSW = 6986, + I_PMAXUB = 6670, I_PMAXUD = 8247, I_PMAXUW = 8230, I_PMINSB = 8128, I_PMINSD = 8145, + I_PMINSW = 6924, I_PMINUB = 6612, I_PMINUD = 8179, I_PMINUW = 8162, I_PMOVMSKB = 6553, + I_PMOVSXBD = 7776, I_PMOVSXBQ = 7797, I_PMOVSXBW = 7755, I_PMOVSXDQ = 7860, + I_PMOVSXWD = 7818, I_PMOVSXWQ = 7839, I_PMOVZXBD = 8004, I_PMOVZXBQ = 8025, + I_PMOVZXBW = 7983, I_PMOVZXDQ = 8088, I_PMOVZXWD = 8046, I_PMOVZXWQ = 8067, + I_PMULDQ = 7881, I_PMULHRSW = 7560, I_PMULHRW = 2083, I_PMULHUW = 6762, I_PMULHW = 6781, + I_PMULLD = 8264, I_PMULLW = 6518, I_PMULUDQ = 7076, I_POP = 22, I_POPA = 98, + I_POPCNT = 4360, I_POPF = 277, I_POR = 6941, I_PREFETCH = 1894, I_PREFETCHNTA = 2424, + I_PREFETCHT0 = 2437, I_PREFETCHT1 = 2449, I_PREFETCHT2 = 2461, I_PREFETCHW = 1904, + I_PSADBW = 7114, I_PSHUFB = 7363, I_PSHUFD = 4010, I_PSHUFHW = 4018, I_PSHUFLW = 4027, + I_PSHUFW = 4002, I_PSIGNB = 7509, I_PSIGND = 7543, I_PSIGNW = 7526, I_PSLLD = 7046, + I_PSLLDQ = 9869, I_PSLLQ = 7061, I_PSLLW = 7031, I_PSRAD = 6732, I_PSRAW = 6717, + I_PSRLD = 6473, I_PSRLDQ = 9852, I_PSRLQ = 6488, I_PSRLW = 6458, I_PSUBB = 7166, + I_PSUBD = 7196, I_PSUBQ = 7211, I_PSUBSB = 6890, I_PSUBSW = 6907, I_PSUBUSB = 6574, + I_PSUBUSW = 6593, I_PSUBW = 7181, I_PSWAPD = 2092, I_PTEST = 7651, I_PUNPCKHBW = 3802, + I_PUNPCKHDQ = 3848, I_PUNPCKHQDQ = 3917, I_PUNPCKHWD = 3825, I_PUNPCKLBW = 3634, + I_PUNPCKLDQ = 3680, I_PUNPCKLQDQ = 3892, I_PUNPCKLWD = 3657, I_PUSH = 16, + I_PUSHA = 91, I_PUSHF = 270, I_PXOR = 7003, I_RCL = 977, I_RCPPS = 2975, I_RCPSS = 2982, + I_RCR = 982, I_RDFSBASE = 9904, I_RDGSBASE = 9934, I_RDMSR = 600, I_RDPMC = 607, + I_RDRAND = 10048, I_RDTSC = 593, I_RDTSCP = 1886, I_RET = 325, I_RETF = 354, + I_ROL = 967, I_ROR = 972, I_ROUNDPD = 9318, I_ROUNDPS = 9299, I_ROUNDSD = 9356, + I_ROUNDSS = 9337, I_RSM = 882, I_RSQRTPS = 2937, I_RSQRTSS = 2946, I_SAHF = 283, + I_SAL = 997, I_SALC = 394, I_SAR = 1002, I_SBB = 36, I_SCAS = 319, I_SETA = 807, + I_SETAE = 780, I_SETB = 774, I_SETBE = 800, I_SETG = 859, I_SETGE = 845, I_SETL = 839, + I_SETLE = 852, I_SETNO = 767, I_SETNP = 832, I_SETNS = 819, I_SETNZ = 793, + I_SETO = 761, I_SETP = 826, I_SETS = 813, I_SETZ = 787, I_SFENCE = 4343, I_SGDT = 1691, + I_SHL = 987, I_SHLD = 876, I_SHR = 992, I_SHRD = 892, I_SHUFPD = 6358, I_SHUFPS = 6350, + I_SIDT = 1697, I_SKINIT = 1861, I_SLDT = 1657, I_SMSW = 1715, I_SQRTPD = 2877, + I_SQRTPS = 2869, I_SQRTSD = 2893, I_SQRTSS = 2885, I_STC = 497, I_STD = 517, + I_STGI = 1849, I_STI = 507, I_STMXCSR = 9973, I_STOS = 307, I_STR = 1663, I_SUB = 51, + I_SUBPD = 3401, I_SUBPS = 3394, I_SUBSD = 3415, I_SUBSS = 3408, I_SWAPGS = 1878, + I_SYSCALL = 532, I_SYSENTER = 614, I_SYSEXIT = 624, I_SYSRET = 547, I_TEST = 206, + I_TZCNT = 4373, I_UCOMISD = 2764, I_UCOMISS = 2755, I_UD2 = 569, I_UNPCKHPD = 2318, + I_UNPCKHPS = 2308, I_UNPCKLPD = 2276, I_UNPCKLPS = 2266, I_VADDPD = 3161, + I_VADDPS = 3153, I_VADDSD = 3177, I_VADDSS = 3169, I_VADDSUBPD = 6436, I_VADDSUBPS = 6447, + I_VAESDEC = 9239, I_VAESDECLAST = 9260, I_VAESENC = 9197, I_VAESENCLAST = 9218, + I_VAESIMC = 9180, I_VAESKEYGENASSIST = 9834, I_VANDNPD = 3060, I_VANDNPS = 3051, + I_VANDPD = 3027, I_VANDPS = 3019, I_VBLENDPD = 9403, I_VBLENDPS = 9384, I_VBLENDVPD = 9703, + I_VBLENDVPS = 9692, I_VBROADCASTF128 = 7694, I_VBROADCASTSD = 7680, I_VBROADCASTSS = 7666, + I_VCMPEQPD = 5110, I_VCMPEQPS = 4708, I_VCMPEQSD = 5914, I_VCMPEQSS = 5512, + I_VCMPEQ_OSPD = 5291, I_VCMPEQ_OSPS = 4889, I_VCMPEQ_OSSD = 6095, I_VCMPEQ_OSSS = 5693, + I_VCMPEQ_UQPD = 5197, I_VCMPEQ_UQPS = 4795, I_VCMPEQ_UQSD = 6001, I_VCMPEQ_UQSS = 5599, + I_VCMPEQ_USPD = 5400, I_VCMPEQ_USPS = 4998, I_VCMPEQ_USSD = 6204, I_VCMPEQ_USSS = 5802, + I_VCMPFALSEPD = 5232, I_VCMPFALSEPS = 4830, I_VCMPFALSESD = 6036, I_VCMPFALSESS = 5634, + I_VCMPFALSE_OSPD = 5441, I_VCMPFALSE_OSPS = 5039, I_VCMPFALSE_OSSD = 6245, + I_VCMPFALSE_OSSS = 5843, I_VCMPGEPD = 5259, I_VCMPGEPS = 4857, I_VCMPGESD = 6063, + I_VCMPGESS = 5661, I_VCMPGE_OQPD = 5471, I_VCMPGE_OQPS = 5069, I_VCMPGE_OQSD = 6275, + I_VCMPGE_OQSS = 5873, I_VCMPGTPD = 5269, I_VCMPGTPS = 4867, I_VCMPGTSD = 6073, + I_VCMPGTSS = 5671, I_VCMPGT_OQPD = 5484, I_VCMPGT_OQPS = 5082, I_VCMPGT_OQSD = 6288, + I_VCMPGT_OQSS = 5886, I_VCMPLEPD = 5130, I_VCMPLEPS = 4728, I_VCMPLESD = 5934, + I_VCMPLESS = 5532, I_VCMPLE_OQPD = 5317, I_VCMPLE_OQPS = 4915, I_VCMPLE_OQSD = 6121, + I_VCMPLE_OQSS = 5719, I_VCMPLTPD = 5120, I_VCMPLTPS = 4718, I_VCMPLTSD = 5924, + I_VCMPLTSS = 5522, I_VCMPLT_OQPD = 5304, I_VCMPLT_OQPS = 4902, I_VCMPLT_OQSD = 6108, + I_VCMPLT_OQSS = 5706, I_VCMPNEQPD = 5153, I_VCMPNEQPS = 4751, I_VCMPNEQSD = 5957, + I_VCMPNEQSS = 5555, I_VCMPNEQ_OQPD = 5245, I_VCMPNEQ_OQPS = 4843, I_VCMPNEQ_OQSD = 6049, + I_VCMPNEQ_OQSS = 5647, I_VCMPNEQ_OSPD = 5457, I_VCMPNEQ_OSPS = 5055, I_VCMPNEQ_OSSD = 6261, + I_VCMPNEQ_OSSS = 5859, I_VCMPNEQ_USPD = 5345, I_VCMPNEQ_USPS = 4943, I_VCMPNEQ_USSD = 6149, + I_VCMPNEQ_USSS = 5747, I_VCMPNGEPD = 5210, I_VCMPNGEPS = 4808, I_VCMPNGESD = 6014, + I_VCMPNGESS = 5612, I_VCMPNGE_UQPD = 5413, I_VCMPNGE_UQPS = 5011, I_VCMPNGE_UQSD = 6217, + I_VCMPNGE_UQSS = 5815, I_VCMPNGTPD = 5221, I_VCMPNGTPS = 4819, I_VCMPNGTSD = 6025, + I_VCMPNGTSS = 5623, I_VCMPNGT_UQPD = 5427, I_VCMPNGT_UQPS = 5025, I_VCMPNGT_UQSD = 6231, + I_VCMPNGT_UQSS = 5829, I_VCMPNLEPD = 5175, I_VCMPNLEPS = 4773, I_VCMPNLESD = 5979, + I_VCMPNLESS = 5577, I_VCMPNLE_UQPD = 5373, I_VCMPNLE_UQPS = 4971, I_VCMPNLE_UQSD = 6177, + I_VCMPNLE_UQSS = 5775, I_VCMPNLTPD = 5164, I_VCMPNLTPS = 4762, I_VCMPNLTSD = 5968, + I_VCMPNLTSS = 5566, I_VCMPNLT_UQPD = 5359, I_VCMPNLT_UQPS = 4957, I_VCMPNLT_UQSD = 6163, + I_VCMPNLT_UQSS = 5761, I_VCMPORDPD = 5186, I_VCMPORDPS = 4784, I_VCMPORDSD = 5990, + I_VCMPORDSS = 5588, I_VCMPORD_SPD = 5387, I_VCMPORD_SPS = 4985, I_VCMPORD_SSD = 6191, + I_VCMPORD_SSS = 5789, I_VCMPTRUEPD = 5279, I_VCMPTRUEPS = 4877, I_VCMPTRUESD = 6083, + I_VCMPTRUESS = 5681, I_VCMPTRUE_USPD = 5497, I_VCMPTRUE_USPS = 5095, I_VCMPTRUE_USSD = 6301, + I_VCMPTRUE_USSS = 5899, I_VCMPUNORDPD = 5140, I_VCMPUNORDPS = 4738, I_VCMPUNORDSD = 5944, + I_VCMPUNORDSS = 5542, I_VCMPUNORD_SPD = 5330, I_VCMPUNORD_SPS = 4928, I_VCMPUNORD_SSD = 6134, + I_VCMPUNORD_SSS = 5732, I_VCOMISD = 2818, I_VCOMISS = 2809, I_VCVTDQ2PD = 6841, + I_VCVTDQ2PS = 3360, I_VCVTPD2DQ = 6852, I_VCVTPD2PS = 3296, I_VCVTPS2DQ = 3371, + I_VCVTPS2PD = 3285, I_VCVTSD2SI = 2744, I_VCVTSD2SS = 3318, I_VCVTSI2SD = 2558, + I_VCVTSI2SS = 2547, I_VCVTSS2SD = 3307, I_VCVTSS2SI = 2733, I_VCVTTPD2DQ = 6829, + I_VCVTTPS2DQ = 3382, I_VCVTTSD2SI = 2681, I_VCVTTSS2SI = 2669, I_VDIVPD = 3550, + I_VDIVPS = 3542, I_VDIVSD = 3566, I_VDIVSS = 3558, I_VDPPD = 9643, I_VDPPS = 9630, + I_VERR = 1679, I_VERW = 1685, I_VEXTRACTF128 = 9538, I_VEXTRACTPS = 9513, + I_VFMADD132PD = 8409, I_VFMADD132PS = 8396, I_VFMADD132SD = 8435, I_VFMADD132SS = 8422, + I_VFMADD213PD = 8689, I_VFMADD213PS = 8676, I_VFMADD213SD = 8715, I_VFMADD213SS = 8702, + I_VFMADD231PD = 8969, I_VFMADD231PS = 8956, I_VFMADD231SD = 8995, I_VFMADD231SS = 8982, + I_VFMADDSUB132PD = 8348, I_VFMADDSUB132PS = 8332, I_VFMADDSUB213PD = 8628, + I_VFMADDSUB213PS = 8612, I_VFMADDSUB231PD = 8908, I_VFMADDSUB231PS = 8892, + I_VFMSUB132PD = 8461, I_VFMSUB132PS = 8448, I_VFMSUB132SD = 8487, I_VFMSUB132SS = 8474, + I_VFMSUB213PD = 8741, I_VFMSUB213PS = 8728, I_VFMSUB213SD = 8767, I_VFMSUB213SS = 8754, + I_VFMSUB231PD = 9021, I_VFMSUB231PS = 9008, I_VFMSUB231SD = 9047, I_VFMSUB231SS = 9034, + I_VFMSUBADD132PD = 8380, I_VFMSUBADD132PS = 8364, I_VFMSUBADD213PD = 8660, + I_VFMSUBADD213PS = 8644, I_VFMSUBADD231PD = 8940, I_VFMSUBADD231PS = 8924, + I_VFNMADD132PD = 8514, I_VFNMADD132PS = 8500, I_VFNMADD132SD = 8542, I_VFNMADD132SS = 8528, + I_VFNMADD213PD = 8794, I_VFNMADD213PS = 8780, I_VFNMADD213SD = 8822, I_VFNMADD213SS = 8808, + I_VFNMADD231PD = 9074, I_VFNMADD231PS = 9060, I_VFNMADD231SD = 9102, I_VFNMADD231SS = 9088, + I_VFNMSUB132PD = 8570, I_VFNMSUB132PS = 8556, I_VFNMSUB132SD = 8598, I_VFNMSUB132SS = 8584, + I_VFNMSUB213PD = 8850, I_VFNMSUB213PS = 8836, I_VFNMSUB213SD = 8878, I_VFNMSUB213SS = 8864, + I_VFNMSUB231PD = 9130, I_VFNMSUB231PS = 9116, I_VFNMSUB231SD = 9158, I_VFNMSUB231SS = 9144, + I_VHADDPD = 4219, I_VHADDPS = 4228, I_VHSUBPD = 4253, I_VHSUBPS = 4262, I_VINSERTF128 = 9525, + I_VINSERTPS = 9579, I_VLDDQU = 7023, I_VLDMXCSR = 9963, I_VMASKMOVDQU = 7153, + I_VMASKMOVPD = 7971, I_VMASKMOVPS = 7959, I_VMAXPD = 3610, I_VMAXPS = 3602, + I_VMAXSD = 3626, I_VMAXSS = 3618, I_VMCALL = 1735, I_VMCLEAR = 10011, I_VMFUNC = 1803, + I_VMINPD = 3490, I_VMINPS = 3482, I_VMINSD = 3506, I_VMINSS = 3498, I_VMLAUNCH = 1743, + I_VMLOAD = 1833, I_VMMCALL = 1824, I_VMOVAPD = 2498, I_VMOVAPS = 2489, I_VMOVD = 3954, + I_VMOVDDUP = 2256, I_VMOVDQA = 3984, I_VMOVDQU = 3993, I_VMOVHLPS = 2217, + I_VMOVHPD = 2404, I_VMOVHPS = 2395, I_VMOVLHPS = 2385, I_VMOVLPD = 2236, I_VMOVLPS = 2227, + I_VMOVMSKPD = 2858, I_VMOVMSKPS = 2847, I_VMOVNTDQ = 6880, I_VMOVNTDQA = 7927, + I_VMOVNTPD = 2615, I_VMOVNTPS = 2605, I_VMOVQ = 3961, I_VMOVSD = 2165, I_VMOVSHDUP = 2413, + I_VMOVSLDUP = 2245, I_VMOVSS = 2157, I_VMOVUPD = 2148, I_VMOVUPS = 2139, I_VMPSADBW = 9659, + I_VMPTRLD = 10002, I_VMPTRST = 6407, I_VMREAD = 4150, I_VMRESUME = 1753, I_VMRUN = 1817, + I_VMSAVE = 1841, I_VMULPD = 3221, I_VMULPS = 3213, I_VMULSD = 3237, I_VMULSS = 3229, + I_VMWRITE = 4174, I_VMXOFF = 1763, I_VMXON = 10020, I_VORPD = 3088, I_VORPS = 3081, + I_VPABSB = 7717, I_VPABSD = 7747, I_VPABSW = 7732, I_VPACKSSDW = 3881, I_VPACKSSWB = 3713, + I_VPACKUSDW = 7948, I_VPACKUSWB = 3791, I_VPADDB = 7233, I_VPADDD = 7263, + I_VPADDQ = 6510, I_VPADDSB = 6960, I_VPADDSW = 6977, I_VPADDUSW = 6651, I_VPADDW = 7248, + I_VPALIGNR = 9441, I_VPAND = 6635, I_VPANDN = 6694, I_VPAVGB = 6709, I_VPAVGW = 6754, + I_VPBLENDVB = 9714, I_VPBLENDW = 9422, I_VPCLMULQDQ = 9680, I_VPCMPEQB = 4074, + I_VPCMPEQD = 4112, I_VPCMPEQQ = 7907, I_VPCMPEQW = 4093, I_VPCMPESTRI = 9759, + I_VPCMPESTRM = 9736, I_VPCMPGTB = 3733, I_VPCMPGTD = 3771, I_VPCMPGTQ = 8118, + I_VPCMPGTW = 3752, I_VPCMPISTRI = 9805, I_VPCMPISTRM = 9782, I_VPERM2F128 = 9287, + I_VPERMILPD = 7592, I_VPERMILPS = 7581, I_VPEXTRB = 9459, I_VPEXTRD = 9484, + I_VPEXTRQ = 9493, I_VPEXTRW = 6341, I_VPHADDD = 7405, I_VPHADDSW = 7423, I_VPHADDW = 7388, + I_VPHMINPOSUW = 8293, I_VPHSUBD = 7481, I_VPHSUBSW = 7499, I_VPHSUBW = 7464, + I_VPINSRB = 9560, I_VPINSRD = 9606, I_VPINSRQ = 9615, I_VPINSRW = 6324, I_VPMADDUBSW = 7444, + I_VPMADDWD = 7104, I_VPMAXSB = 8204, I_VPMAXSD = 8221, I_VPMAXSW = 6994, I_VPMAXUB = 6678, + I_VPMAXUD = 8255, I_VPMAXUW = 8238, I_VPMINSB = 8136, I_VPMINSD = 8153, I_VPMINSW = 6932, + I_VPMINUB = 6620, I_VPMINUD = 8187, I_VPMINUW = 8170, I_VPMOVMSKB = 6563, + I_VPMOVSXBD = 7786, I_VPMOVSXBQ = 7807, I_VPMOVSXBW = 7765, I_VPMOVSXDQ = 7870, + I_VPMOVSXWD = 7828, I_VPMOVSXWQ = 7849, I_VPMOVZXBD = 8014, I_VPMOVZXBQ = 8035, + I_VPMOVZXBW = 7993, I_VPMOVZXDQ = 8098, I_VPMOVZXWD = 8056, I_VPMOVZXWQ = 8077, + I_VPMULDQ = 7889, I_VPMULHRSW = 7570, I_VPMULHUW = 6771, I_VPMULHW = 6789, + I_VPMULLD = 8272, I_VPMULLW = 6526, I_VPMULUDQ = 7085, I_VPOR = 6946, I_VPSADBW = 7122, + I_VPSHUFB = 7371, I_VPSHUFD = 4036, I_VPSHUFHW = 4045, I_VPSHUFLW = 4055, + I_VPSIGNB = 7517, I_VPSIGND = 7551, I_VPSIGNW = 7534, I_VPSLLD = 7053, I_VPSLLDQ = 9877, + I_VPSLLQ = 7068, I_VPSLLW = 7038, I_VPSRAD = 6739, I_VPSRAW = 6724, I_VPSRLD = 6480, + I_VPSRLDQ = 9860, I_VPSRLQ = 6495, I_VPSRLW = 6465, I_VPSUBB = 7173, I_VPSUBD = 7203, + I_VPSUBQ = 7218, I_VPSUBSB = 6898, I_VPSUBSW = 6915, I_VPSUBUSB = 6583, I_VPSUBUSW = 6602, + I_VPSUBW = 7188, I_VPTEST = 7658, I_VPUNPCKHBW = 3813, I_VPUNPCKHDQ = 3859, + I_VPUNPCKHQDQ = 3929, I_VPUNPCKHWD = 3836, I_VPUNPCKLBW = 3645, I_VPUNPCKLDQ = 3691, + I_VPUNPCKLQDQ = 3904, I_VPUNPCKLWD = 3668, I_VPXOR = 7009, I_VRCPPS = 2989, + I_VRCPSS = 2997, I_VROUNDPD = 9327, I_VROUNDPS = 9308, I_VROUNDSD = 9365, + I_VROUNDSS = 9346, I_VRSQRTPS = 2955, I_VRSQRTSS = 2965, I_VSHUFPD = 6375, + I_VSHUFPS = 6366, I_VSQRTPD = 2910, I_VSQRTPS = 2901, I_VSQRTSD = 2928, I_VSQRTSS = 2919, + I_VSTMXCSR = 9992, I_VSUBPD = 3430, I_VSUBPS = 3422, I_VSUBSD = 3446, I_VSUBSS = 3438, + I_VTESTPD = 7612, I_VTESTPS = 7603, I_VUCOMISD = 2783, I_VUCOMISS = 2773, + I_VUNPCKHPD = 2339, I_VUNPCKHPS = 2328, I_VUNPCKLPD = 2297, I_VUNPCKLPS = 2286, + I_VXORPD = 3117, I_VXORPS = 3109, I_VZEROALL = 4140, I_VZEROUPPER = 4128, + I_WAIT = 10042, I_WBINVD = 561, I_WRFSBASE = 9953, I_WRGSBASE = 9982, I_WRMSR = 586, + I_XABORT = 1007, I_XADD = 946, I_XBEGIN = 1015, I_XCHG = 212, I_XEND = 1811, + I_XGETBV = 1787, I_XLAT = 400, I_XOR = 61, I_XORPD = 3102, I_XORPS = 3095, + I_XRSTOR = 4295, I_XRSTOR64 = 4303, I_XSAVE = 4271, I_XSAVE64 = 4278, I_XSAVEOPT = 4321, + I_XSAVEOPT64 = 4331, I_XSETBV = 1795, I__3DNOW = 10056 +} _InstructionType; + +typedef enum { + R_RAX, R_RCX, R_RDX, R_RBX, R_RSP, R_RBP, R_RSI, R_RDI, R_R8, R_R9, R_R10, R_R11, R_R12, R_R13, R_R14, R_R15, + R_EAX, R_ECX, R_EDX, R_EBX, R_ESP, R_EBP, R_ESI, R_EDI, R_R8D, R_R9D, R_R10D, R_R11D, R_R12D, R_R13D, R_R14D, R_R15D, + R_AX, R_CX, R_DX, R_BX, R_SP, R_BP, R_SI, R_DI, R_R8W, R_R9W, R_R10W, R_R11W, R_R12W, R_R13W, R_R14W, R_R15W, + R_AL, R_CL, R_DL, R_BL, R_AH, R_CH, R_DH, R_BH, R_R8B, R_R9B, R_R10B, R_R11B, R_R12B, R_R13B, R_R14B, R_R15B, + R_SPL, R_BPL, R_SIL, R_DIL, + R_ES, R_CS, R_SS, R_DS, R_FS, R_GS, + R_RIP, + R_ST0, R_ST1, R_ST2, R_ST3, R_ST4, R_ST5, R_ST6, R_ST7, + R_MM0, R_MM1, R_MM2, R_MM3, R_MM4, R_MM5, R_MM6, R_MM7, + R_XMM0, R_XMM1, R_XMM2, R_XMM3, R_XMM4, R_XMM5, R_XMM6, R_XMM7, R_XMM8, R_XMM9, R_XMM10, R_XMM11, R_XMM12, R_XMM13, R_XMM14, R_XMM15, + R_YMM0, R_YMM1, R_YMM2, R_YMM3, R_YMM4, R_YMM5, R_YMM6, R_YMM7, R_YMM8, R_YMM9, R_YMM10, R_YMM11, R_YMM12, R_YMM13, R_YMM14, R_YMM15, + R_CR0, R_UNUSED0, R_CR2, R_CR3, R_CR4, R_UNUSED1, R_UNUSED2, R_UNUSED3, R_CR8, + R_DR0, R_DR1, R_DR2, R_DR3, R_UNUSED4, R_UNUSED5, R_DR6, R_DR7 +} _RegisterType; + +#endif /* MNEMONICS_H */ diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/operands.c b/module/src/main/cpp/whale/src/dbi/x86/distorm/operands.c new file mode 100644 index 00000000..8712e1bd --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/operands.c @@ -0,0 +1,1290 @@ +/* +operands.c + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ + + +#include "config.h" +#include "operands.h" +#include "x86defs.h" +#include "insts.h" +#include "mnemonics.h" + + +/* Maps a register to its register-class mask. */ +uint32_t _REGISTERTORCLASS[] = /* Based on _RegisterType enumeration! */ +{RM_AX, RM_CX, RM_DX, RM_BX, RM_SP, RM_BP, RM_SI, RM_DI, RM_R8, RM_R9, RM_R10, RM_R11, RM_R12, RM_R13, RM_R14, RM_R15, + RM_AX, RM_CX, RM_DX, RM_BX, RM_SP, RM_BP, RM_SI, RM_DI, RM_R8, RM_R9, RM_R10, RM_R11, RM_R12, RM_R13, RM_R14, RM_R15, + RM_AX, RM_CX, RM_DX, RM_BX, RM_SP, RM_BP, RM_SI, RM_DI, RM_R8, RM_R9, RM_R10, RM_R11, RM_R12, RM_R13, RM_R14, RM_R15, + RM_AX, RM_CX, RM_DX, RM_BX, RM_AX, RM_CX, RM_DX, RM_BX, RM_R8, RM_R9, RM_R10, RM_R11, RM_R12, RM_R13, RM_R14, RM_R15, + RM_SP, RM_BP, RM_SI, RM_DI, + 0, 0, 0, 0, 0, 0, + 0, + RM_FPU, RM_FPU, RM_FPU, RM_FPU, RM_FPU, RM_FPU, RM_FPU, RM_FPU, + RM_MMX, RM_MMX, RM_MMX, RM_MMX, RM_MMX, RM_MMX, RM_MMX, RM_MMX, + RM_SSE, RM_SSE, RM_SSE, RM_SSE, RM_SSE, RM_SSE, RM_SSE, RM_SSE, RM_SSE, RM_SSE, RM_SSE, RM_SSE, RM_SSE, RM_SSE, RM_SSE, RM_SSE, + RM_AVX, RM_AVX, RM_AVX, RM_AVX, RM_AVX, RM_AVX, RM_AVX, RM_AVX, RM_AVX, RM_AVX, RM_AVX, RM_AVX, RM_AVX, RM_AVX, RM_AVX, RM_AVX, + RM_CR, 0, RM_CR, RM_CR, RM_CR, 0, 0, 0, RM_CR, + RM_DR, RM_DR, RM_DR, RM_DR, 0, 0, RM_DR, RM_DR +}; + +typedef enum {OPERAND_SIZE_NONE = 0, OPERAND_SIZE8, OPERAND_SIZE16, OPERAND_SIZE32, OPERAND_SIZE64, OPERAND_SIZE80, OPERAND_SIZE128, OPERAND_SIZE256} _OperandSizeType; +static uint16_t _OPSIZETOINT[] = {0, 8, 16, 32, 64, 80, 128, 256}; + +/* A helper function to fix the 8 bits register if REX is used (to support SIL, DIL, etc). */ +static unsigned int _FASTCALL_ operands_fix_8bit_rex_base(unsigned int reg) +{ + if ((reg >= 4) && (reg < 8)) return reg + REGS8_REX_BASE - 4; + return reg + REGS8_BASE; +} + +/* A helper function to set operand's type and size. */ +static void _FASTCALL_ operands_set_ts(_Operand* op, _OperandType type, uint16_t size) +{ + op->type = type; + op->size = size; +} + +/* A helper function to set operand's type, size and index. */ +static void _FASTCALL_ operands_set_tsi(_Operand* op, _OperandType type, uint16_t size, unsigned int index) +{ + op->type = type; + op->index = (uint8_t)index; + op->size = size; +} + +/* A helper function to read an unsigned integer from the stream safely. */ +static int _FASTCALL_ read_stream_safe_uint(_CodeInfo* ci, void* result, unsigned int size) +{ + ci->codeLen -= size; + if (ci->codeLen < 0) return FALSE; + switch (size) + { + case 1: *(uint8_t*)result = *(uint8_t*)ci->code; break; + case 2: *(uint16_t*)result = RUSHORT(ci->code); break; + case 4: *(uint32_t*)result = RULONG(ci->code); break; + case 8: *(uint64_t*)result = RULLONG(ci->code); break; + } + ci->code += size; + return TRUE; +} + +/* A helper function to read a signed integer from the stream safely. */ +static int _FASTCALL_ read_stream_safe_sint(_CodeInfo* ci, int64_t* result, unsigned int size) +{ + ci->codeLen -= size; + if (ci->codeLen < 0) return FALSE; + switch (size) + { + case 1: *result = *(int8_t*)ci->code; break; + case 2: *result = RSHORT(ci->code); break; + case 4: *result = RLONG(ci->code); break; + case 8: *result = RLLONG(ci->code); break; + } + ci->code += size; + return TRUE; +} + +/* + * SIB decoding is the most confusing part when decoding IA-32 instructions. + * This explanation should clear up some stuff. + * + * ! When base == 5, use EBP as the base register ! + * if (rm == 4) { + * if mod == 01, decode SIB byte and ALSO read a 8 bits displacement. + * if mod == 10, decode SIB byte and ALSO read a 32 bits displacement. + * if mod == 11 <-- EXCEPTION, this is a general-purpose register and mustn't lead to SIB decoding! + * ; So far so good, now the confusing part comes in with mod == 0 and base=5, but no worry. + * if (mod == 00) { + * decode SIB byte WITHOUT any displacement. + * EXCEPTION!!! when base == 5, read a 32 bits displacement, but this time DO NOT use (EBP) BASE at all! + * } + * + * NOTE: base could specify None (no base register) if base==5 and mod==0, but then you also need DISP32. + * } + */ +static void operands_extract_sib(_DInst* di, _OperandNumberType opNum, + _PrefixState* ps, _DecodeType effAdrSz, + unsigned int sib, unsigned int mod) +{ + unsigned int scale = 0, index = 0, base = 0; + unsigned int vrex = ps->vrex; + uint8_t* pIndex = NULL; + + _Operand* op = &di->ops[opNum]; + + /* + * SIB bits: + * |7---6-5----3-2---0| + * |SCALE| INDEX| BASE| + * |------------------| + */ + scale = (sib >> 6) & 3; + index = (sib >> 3) & 7; + base = sib & 7; + + /* + * The following fields: base/index/scale/disp8/32 are ALL optional by specific rules! + * The idea here is to keep the indirection as a simple-memory type. + * Because the base is optional, and we might be left with only one index. + * So even if there's a base but no index, or vice versa, we end up with one index register. + */ + + /* In 64 bits the REX prefix might affect the index of the SIB byte. */ + if (vrex & PREFIX_EX_X) { + ps->usedPrefixes |= INST_PRE_REX; + index += EX_GPR_BASE; + } + + if (index == 4) { /* No index is used. Use SMEM. */ + op->type = O_SMEM; + pIndex = &op->index; + } else { + op->type = O_MEM; + pIndex = &di->base; + /* No base, unless it is updated below. E.G: [EAX*4] has no base reg. */ + } + + if (base != 5) { + if (vrex & PREFIX_EX_B) ps->usedPrefixes |= INST_PRE_REX; + *pIndex = effAdrSz == Decode64Bits ? REGS64_BASE : REGS32_BASE; + *pIndex += (uint8_t)(base + ((vrex & PREFIX_EX_B) ? EX_GPR_BASE : 0)); + } else if (mod != 0) { + /* + * if base == 5 then you have to decode according to MOD. + * mod(00) - disp32. + * mod(01) - disp8 + rBP + * mod(10) - disp32 + rBP + * mod(11) - not possible, it's a general-purpose register. + */ + + if (vrex & PREFIX_EX_B) ps->usedPrefixes |= INST_PRE_REX; + if (effAdrSz == Decode64Bits) *pIndex = REGS64_BASE + 5 + ((vrex & PREFIX_EX_B) ? EX_GPR_BASE : 0); + else *pIndex = REGS32_BASE + 5 + ((vrex & PREFIX_EX_B) ? EX_GPR_BASE : 0); + } else if (index == 4) { + /* 32bits displacement only. */ + op->type = O_DISP; + return; + } + + if (index != 4) { /* In 64 bits decoding mode, if index == R12, it's valid! */ + if (effAdrSz == Decode64Bits) op->index = (uint8_t)(REGS64_BASE + index); + else op->index = (uint8_t)(REGS32_BASE + index); + di->scale = scale != 0 ? (1 << scale) : 0; + } +} + +/* + * This seems to be the hardest part in decoding the operands. + * If you take a look carefully at Table 2-2. 32-Bit Addressing Forms with the ModR/M Byte, + * you will understand it's easy to decode the operands. + + * First we check the DT, so we can decide according to which Table in the documentation we are supposed to decode. + * Then we follow the specific table whether it's 16 bits or 32/64 bits. + + * Don't forget that Operand Size AND Address Size prefixes may change the decoding! + + * Some instructions force the use of RM16 or other specific types, so take it into account. + */ +static int operands_extract_modrm(_CodeInfo* ci, + _DInst* di, _OpType type, + _OperandNumberType opNum, _PrefixState* ps, + _DecodeType effOpSz, _DecodeType effAdrSz, + int* lockableInstruction, unsigned int mod, unsigned int rm, + _iflags instFlags) +{ + unsigned int vrex = ps->vrex, sib = 0, base = 0; + _Operand* op = &di->ops[opNum]; + uint16_t size = 0; + + if (mod == 3) { + /* + * General-purpose register is handled the same way in 16/32/64 bits decoding modes. + * NOTE!! that we have to override the size of the register, since it was set earlier as Memory and not Register! + */ + op->type = O_REG; + /* Start with original size which was set earlier, some registers have same size of memory and depend on it. */ + size = op->size; + switch(type) + { + case OT_RFULL_M16: + case OT_RM_FULL: + switch (effOpSz) + { + case Decode16Bits: + ps->usedPrefixes |= INST_PRE_OP_SIZE; + if (vrex & PREFIX_EX_B) { + ps->usedPrefixes |= INST_PRE_REX; + rm += EX_GPR_BASE; + } + size = 16; + rm += REGS16_BASE; + break; + case Decode32Bits: + ps->usedPrefixes |= INST_PRE_OP_SIZE; + if (vrex & PREFIX_EX_B) { + ps->usedPrefixes |= INST_PRE_REX; + rm += EX_GPR_BASE; + } + size = 32; + rm += REGS32_BASE; + break; + case Decode64Bits: + /* A fix for SMSW RAX which use the REX prefix. */ + if (type == OT_RFULL_M16) ps->usedPrefixes |= INST_PRE_REX; + /* CALL NEAR/PUSH/POP defaults to 64 bits. --> INST_64BITS, REX isn't required, thus ignored anyways. */ + if (instFlags & INST_PRE_REX) ps->usedPrefixes |= INST_PRE_REX; + /* Include REX if used for REX.B. */ + if (vrex & PREFIX_EX_B) { + ps->usedPrefixes |= INST_PRE_REX; + rm += EX_GPR_BASE; + } + size = 64; + rm += REGS64_BASE; + break; + } + break; + case OT_R32_64_M8: + /* FALL THROUGH, decode 32 or 64 bits register. */ + case OT_R32_64_M16: + /* FALL THROUGH, decode 32 or 64 bits register. */ + case OT_RM32_64: /* Take care specifically in MOVNTI/MOVD/CVT's instructions, making it _REG64 with REX or if they are promoted. */ + if (vrex & PREFIX_EX_B) { + ps->usedPrefixes |= INST_PRE_REX; + rm += EX_GPR_BASE; + } + /* Is it a promoted instruction? (only INST_64BITS is set and REX isn't required.) */ + if ((ci->dt == Decode64Bits) && ((instFlags & (INST_64BITS | INST_PRE_REX)) == INST_64BITS)) { + size = 64; + rm += REGS64_BASE; + break; + } + /* Give a chance to REX.W. Because if it was a promoted instruction we don't care about REX.W anyways. */ + if (vrex & PREFIX_EX_W) { + ps->usedPrefixes |= INST_PRE_REX; + size = 64; + rm += REGS64_BASE; + } else { + size = 32; + rm += REGS32_BASE; + } + break; + case OT_RM16_32: /* Used only with MOVZXD instruction to support 16 bits operand. */ + if (vrex & PREFIX_EX_B) { + ps->usedPrefixes |= INST_PRE_REX; + rm += EX_GPR_BASE; + } + /* Is it 16 bits operand size? */ + if (ps->decodedPrefixes & INST_PRE_OP_SIZE) { + ps->usedPrefixes |= INST_PRE_OP_SIZE; + size = 16; + rm += REGS16_BASE; + } else { + size = 32; + rm += REGS32_BASE; + } + break; + case OT_RM16: + if (vrex & PREFIX_EX_B) { + ps->usedPrefixes |= INST_PRE_REX; + rm += EX_GPR_BASE; + } + rm += REGS16_BASE; + break; + case OT_RM8: + if (ps->prefixExtType == PET_REX) { + ps->usedPrefixes |= INST_PRE_REX; + rm = operands_fix_8bit_rex_base(rm + ((vrex & PREFIX_EX_B) ? EX_GPR_BASE : 0)); + } else rm += REGS8_BASE; + break; + case OT_MM32: + case OT_MM64: + /* MMX doesn't support extended registers. */ + size = 64; + rm += MMXREGS_BASE; + break; + + case OT_XMM16: + case OT_XMM32: + case OT_XMM64: + case OT_XMM128: + if (vrex & PREFIX_EX_B) { + ps->usedPrefixes |= INST_PRE_REX; + rm += EX_GPR_BASE; + } + size = 128; + rm += SSEREGS_BASE; + break; + + case OT_RM32: + case OT_R32_M8: + case OT_R32_M16: + if (vrex & PREFIX_EX_B) { + ps->usedPrefixes |= INST_PRE_REX; + rm += EX_GPR_BASE; + } + size = 32; + rm += REGS32_BASE; + break; + + case OT_YMM256: + if (vrex & PREFIX_EX_B) rm += EX_GPR_BASE; + rm += AVXREGS_BASE; + break; + case OT_YXMM64_256: + case OT_YXMM128_256: + if (vrex & PREFIX_EX_B) rm += EX_GPR_BASE; + if (vrex & PREFIX_EX_L) { + size = 256; + rm += AVXREGS_BASE; + } else { + size = 128; + rm += SSEREGS_BASE; + } + break; + case OT_WXMM32_64: + case OT_LXMM64_128: + if (vrex & PREFIX_EX_B) rm += EX_GPR_BASE; + size = 128; + rm += SSEREGS_BASE; + break; + + case OT_WRM32_64: + case OT_REG32_64_M8: + case OT_REG32_64_M16: + if (vrex & PREFIX_EX_B) rm += EX_GPR_BASE; + if (vrex & PREFIX_EX_W) { + size = 64; + rm += REGS64_BASE; + } else { + size = 32; + rm += REGS32_BASE; + } + break; + + default: return FALSE; + } + op->size = size; + op->index = (uint8_t)rm; + return TRUE; + } + + /* Memory indirection decoding ahead:) */ + + ps->usedPrefixes |= INST_PRE_ADDR_SIZE; + if (lockableInstruction && (ps->decodedPrefixes & INST_PRE_LOCK)) *lockableInstruction = TRUE; + + if (effAdrSz == Decode16Bits) { + /* Decoding according to Table 2-1. (16 bits) */ + if ((mod == 0) && (rm == 6)) { + /* 6 is a special case - only 16 bits displacement. */ + op->type = O_DISP; + di->dispSize = 16; + if (!read_stream_safe_sint(ci, (int64_t*)&di->disp, sizeof(int16_t))) return FALSE; + } else { + /* + * Create the O_MEM for 16 bits indirection that requires 2 registers, E.G: [BS+SI]. + * or create O_SMEM for a single register indirection, E.G: [BP]. + */ + static uint8_t MODS[] = {R_BX, R_BX, R_BP, R_BP, R_SI, R_DI, R_BP, R_BX}; + static uint8_t MODS2[] = {R_SI, R_DI, R_SI, R_DI}; + if (rm < 4) { + op->type = O_MEM; + di->base = MODS[rm]; + op->index = MODS2[rm]; + } else { + op->type = O_SMEM; + op->index = MODS[rm]; + } + + if (mod == 1) { /* 8 bits displacement + indirection */ + di->dispSize = 8; + if (!read_stream_safe_sint(ci, (int64_t*)&di->disp, sizeof(int8_t))) return FALSE; + } else if (mod == 2) { /* 16 bits displacement + indirection */ + di->dispSize = 16; + if (!read_stream_safe_sint(ci, (int64_t*)&di->disp, sizeof(int16_t))) return FALSE; + } + } + + if ((rm == 2) || (rm == 3) || ((rm == 6) && (mod != 0))) { + /* BP's default segment is SS, so ignore it. */ + prefixes_use_segment(INST_PRE_SS, ps, ci->dt, di); + } else { + /* Ignore default DS segment. */ + prefixes_use_segment(INST_PRE_DS, ps, ci->dt, di); + } + } else { /* Decode32Bits or Decode64Bits! */ + /* Remember that from a 32/64 bits ModR/M byte a SIB byte could follow! */ + if ((mod == 0) && (rm == 5)) { + + /* 5 is a special case - only 32 bits displacement, or RIP relative. */ + di->dispSize = 32; + if (!read_stream_safe_sint(ci, (int64_t*)&di->disp, sizeof(int32_t))) return FALSE; + + if (ci->dt == Decode64Bits) { + /* In 64 bits decoding mode depsite of the address size, a RIP-relative address it is. */ + op->type = O_SMEM; + op->index = R_RIP; + di->flags |= FLAG_RIP_RELATIVE; + } else { + /* Absolute address: */ + op->type = O_DISP; + } + } else { + if (rm == 4) { + /* 4 is a special case - SIB byte + disp8/32 follows! */ + /* Read SIB byte. */ + if (!read_stream_safe_uint(ci, &sib, sizeof(int8_t))) return FALSE; + operands_extract_sib(di, opNum, ps, effAdrSz, sib, mod); + } else { + op->type = O_SMEM; + if (vrex & PREFIX_EX_B) { + ps->usedPrefixes |= INST_PRE_REX; + rm += EX_GPR_BASE; + } + + if (effAdrSz == Decode64Bits) op->index = (uint8_t)(REGS64_BASE + rm); + else op->index = (uint8_t)(REGS32_BASE + rm); + } + + if (mod == 1) { + di->dispSize = 8; + if (!read_stream_safe_sint(ci, (int64_t*)&di->disp, sizeof(int8_t))) return FALSE; + } else if ((mod == 2) || ((sib & 7) == 5)) { /* If there is no BASE, read DISP32! */ + di->dispSize = 32; + if (!read_stream_safe_sint(ci, (int64_t*)&di->disp, sizeof(int32_t))) return FALSE; + } + } + + /* Get the base register. */ + base = op->index; + if (di->base != R_NONE) base = di->base; + else if (di->scale >= 2) base = 0; /* If it's only an index but got scale, it's still DS. */ + /* Default for EBP/ESP is SS segment. 64 bits mode ignores DS anyway. */ + if ((base == R_EBP) || (base == R_ESP)) prefixes_use_segment(INST_PRE_SS, ps, ci->dt, di); + else prefixes_use_segment(INST_PRE_DS, ps, ci->dt, di); + } + + return TRUE; +} + + +/* + * This function is reponsible to textually format a required operand according to its type. + * It is vital to understand that there are other operands than what the ModR/M byte specifies. + + * Only by decoding the operands of an instruction which got a LOCK prefix, we could tell whether it may use the LOCK prefix. + * According to Intel, LOCK prefix must precede some specific instructions AND in their memory destination operand form (which means first operand). + * LOCK INC EAX, would generate an exception, but LOCK INC [EAX] is alright. + * Also LOCK ADD BX, [BP] would generate an exception. + + * Return code: + * TRUE - continue parsing the instruction and its operands, everything went right 'till now. + * FALSE - not enough bytes, or invalid operands. + */ + +int operands_extract(_CodeInfo* ci, _DInst* di, _InstInfo* ii, + _iflags instFlags, _OpType type, _OperandNumberType opNum, + unsigned int modrm, _PrefixState* ps, _DecodeType effOpSz, + _DecodeType effAdrSz, int* lockableInstruction) +{ + int ret = 0; + unsigned int mod = 0, reg = 0, rm = 0, vexV = ps->vexV; + unsigned int vrex = ps->vrex, typeHandled = TRUE; + _Operand* op = &di->ops[opNum]; + + /* Used to indicate the size of the MEMORY INDIRECTION only. */ + _OperandSizeType opSize = OPERAND_SIZE_NONE; + + /* + * ModRM bits: + * |7-6-5--------3-2-0| + * |MOD|REG/OPCODE|RM | + * |------------------| + */ + mod = (modrm >> 6) & 3; /* Mode(register-indirection, disp8+reg+indirection, disp16+reg+indirection, general-purpose register) */ + reg = (modrm >> 3) & 7; /* Register(could be part of the opcode itself or general-purpose register) */ + rm = modrm & 7; /* Specifies which general-purpose register or disp+reg to use. */ + + /* -- Memory Indirection Operands (that cannot be a general purpose register) -- */ + switch (type) + { + case OT_MEM64_128: /* Used only by CMPXCHG8/16B. */ + /* Make a specific check when the type is OT_MEM64_128 since the lockable CMPXCHG8B uses this one... */ + if (lockableInstruction && (ps->decodedPrefixes & INST_PRE_LOCK)) *lockableInstruction = TRUE; + if (effOpSz == Decode64Bits) { + ps->usedPrefixes |= INST_PRE_REX; + opSize = OPERAND_SIZE128; + } else opSize = OPERAND_SIZE64; + break; + case OT_MEM32: opSize = OPERAND_SIZE32; break; + case OT_MEM32_64: + /* Used by MOVNTI. Default size is 32bits, 64bits with REX. */ + if (effOpSz == Decode64Bits) { + ps->usedPrefixes |= INST_PRE_REX; + opSize = OPERAND_SIZE64; + } else opSize = OPERAND_SIZE32; + break; + case OT_MEM64: opSize = OPERAND_SIZE64; break; + case OT_MEM128: opSize = OPERAND_SIZE128; break; + case OT_MEM16_FULL: /* The size indicates about the second item of the pair. */ + switch (effOpSz) + { + case Decode16Bits: + ps->usedPrefixes |= INST_PRE_OP_SIZE; + opSize = OPERAND_SIZE16; + break; + case Decode32Bits: + ps->usedPrefixes |= INST_PRE_OP_SIZE; + opSize = OPERAND_SIZE32; + break; + case Decode64Bits: + /* Mark usage of REX only if it was required. */ + if ((instFlags & (INST_64BITS | INST_PRE_REX)) == (INST_64BITS | INST_PRE_REX)) ps->usedPrefixes |= INST_PRE_REX; + opSize = OPERAND_SIZE64; + break; + } + break; + case OT_MEM16_3264: /* The size indicates about the second item of the pair. */ + if (ci->dt == Decode64Bits) opSize = OPERAND_SIZE64; + else opSize = OPERAND_SIZE32; + break; + case OT_MEM_OPT: + /* Since the MEM is optional, only when mod != 3, then return true as if the operand was alright. */ + if (mod == 0x3) return TRUE; + break; + case OT_FPUM16: opSize = OPERAND_SIZE16; break; + case OT_FPUM32: opSize = OPERAND_SIZE32; break; + case OT_FPUM64: opSize = OPERAND_SIZE64; break; + case OT_FPUM80: opSize = OPERAND_SIZE80; break; + case OT_LMEM128_256: + if (vrex & PREFIX_EX_L) opSize = OPERAND_SIZE256; + else opSize = OPERAND_SIZE128; + break; + case OT_MEM: /* Size is unknown, but still handled. */ break; + default: typeHandled = FALSE; break; + } + if (typeHandled) { + /* All of the above types can't use a general-purpose register (a MOD of 3)!. */ + if (mod == 0x3) { + if (lockableInstruction) *lockableInstruction = FALSE; + return FALSE; + } + op->size = _OPSIZETOINT[opSize]; + ret = operands_extract_modrm(ci, di, type, opNum, ps, effOpSz, effAdrSz, lockableInstruction, mod, rm, instFlags); + if ((op->type == O_REG) || (op->type == O_SMEM) || (op->type == O_MEM)) { + di->usedRegistersMask |= _REGISTERTORCLASS[op->index]; + } + return ret; + } + + /* -- Memory Indirection Operands (that can be a register) -- */ + typeHandled = TRUE; + switch (type) + { + case OT_RM_FULL: + ps->usedPrefixes |= INST_PRE_OP_SIZE; + /* PUSH/JMP/CALL are automatically promoted to 64 bits! */ + if (effOpSz == Decode32Bits) { + opSize = OPERAND_SIZE32; + break; + } else if (effOpSz == Decode64Bits) { + /* Mark usage of REX only if it was required. */ + if ((instFlags & INST_64BITS) == 0) ps->usedPrefixes |= INST_PRE_REX; + opSize = OPERAND_SIZE64; + break; + } + /* FALL THROUGH BECAUSE dt==Decoded16Bits @-<----*/ + case OT_RM16: + /* If we got here not from OT_RM16, then the prefix was used. */ + if (type != OT_RM16) ps->usedPrefixes |= INST_PRE_OP_SIZE; + opSize = OPERAND_SIZE16; + break; + case OT_RM32_64: + /* The default size is 32, which can be 64 with a REX only. */ + if (effOpSz == Decode64Bits) { + opSize = OPERAND_SIZE64; + /* Mark REX prefix as used if non-promoted instruction. */ + if ((instFlags & (INST_64BITS | INST_PRE_REX)) == (INST_64BITS | INST_PRE_REX)) { + ps->usedPrefixes |= INST_PRE_REX; + } + } else opSize = OPERAND_SIZE32; + break; + case OT_RM16_32: + /* Ignore REX, it's either 32 or 16 bits RM. */ + if (ps->decodedPrefixes & INST_PRE_OP_SIZE) { + ps->usedPrefixes |= INST_PRE_OP_SIZE; + /* Assume: We are in 64bits when we have this operand used. */ + opSize = OPERAND_SIZE16; + } else opSize = OPERAND_SIZE32; + break; + case OT_WXMM32_64: + case OT_WRM32_64: + if (vrex & PREFIX_EX_W) opSize = OPERAND_SIZE64; + else opSize = OPERAND_SIZE32; + break; + case OT_YXMM64_256: + if (vrex & PREFIX_EX_L) opSize = OPERAND_SIZE256; + else opSize = OPERAND_SIZE64; + break; + case OT_YXMM128_256: + if (vrex & PREFIX_EX_L) opSize = OPERAND_SIZE256; + else opSize = OPERAND_SIZE128; + break; + case OT_LXMM64_128: + if (vrex & PREFIX_EX_L) opSize = OPERAND_SIZE128; + else opSize = OPERAND_SIZE64; + break; + case OT_RFULL_M16: + ps->usedPrefixes |= INST_PRE_OP_SIZE; + opSize = OPERAND_SIZE16; + break; + + case OT_RM8: + case OT_R32_M8: + case OT_R32_64_M8: + case OT_REG32_64_M8: + opSize = OPERAND_SIZE8; + break; + + case OT_XMM16: + case OT_R32_M16: + case OT_R32_64_M16: + case OT_REG32_64_M16: + opSize = OPERAND_SIZE16; + break; + + case OT_RM32: + case OT_MM32: + case OT_XMM32: + opSize = OPERAND_SIZE32; + break; + + case OT_MM64: + case OT_XMM64: + opSize = OPERAND_SIZE64; + break; + + case OT_XMM128: opSize = OPERAND_SIZE128; break; + case OT_YMM256: opSize = OPERAND_SIZE256; break; + default: typeHandled = FALSE; break; + } + if (typeHandled) { + /* Fill size of memory dereference for operand. */ + op->size = _OPSIZETOINT[opSize]; + ret = operands_extract_modrm(ci, di, type, opNum, ps, effOpSz, effAdrSz, lockableInstruction, mod, rm, instFlags); + if ((op->type == O_REG) || (op->type == O_SMEM) || (op->type == O_MEM)) { + di->usedRegistersMask |= _REGISTERTORCLASS[op->index]; + } + return ret; + } + + /* Simple operand type (no ModRM byte). */ + switch (type) + { + case OT_IMM8: + operands_set_ts(op, O_IMM, 8); + if (!read_stream_safe_uint(ci, &di->imm.byte, sizeof(int8_t))) return FALSE; + break; + case OT_IMM_FULL: /* 16, 32 or 64, depends on prefixes. */ + if (effOpSz == Decode16Bits) { + ps->usedPrefixes |= INST_PRE_OP_SIZE; + /* FALL THROUGH */ + case OT_IMM16: /* Force 16 bits imm. */ + operands_set_ts(op, O_IMM, 16); + if (!read_stream_safe_uint(ci, &di->imm.word, sizeof(int16_t))) return FALSE; + break; + /* + * Extension: MOV imm64, requires REX. + * Make sure it needs the REX. + * REX must be present because op size function takes it into consideration. + */ + } else if ((effOpSz == Decode64Bits) && + ((instFlags & (INST_64BITS | INST_PRE_REX)) == (INST_64BITS | INST_PRE_REX))) { + ps->usedPrefixes |= INST_PRE_REX; + + operands_set_ts(op, O_IMM, 64); + if (!read_stream_safe_uint(ci, &di->imm.qword, sizeof(int64_t))) return FALSE; + break; + } else ps->usedPrefixes |= INST_PRE_OP_SIZE; + /* FALL THROUGH BECAUSE dt==Decoded32Bits @-<----*/ + case OT_IMM32: + op->type = O_IMM; + if (ci->dt == Decode64Bits) { + /* + * Imm32 is sign extended to 64 bits! + * Originally the op size was 64, but later was changed to reflect real size of imm. + */ + op->size = 32; + /* Use this as an indicator that it should be signed extended. */ + di->flags |= FLAG_IMM_SIGNED; + if (!read_stream_safe_sint(ci, &di->imm.sqword, sizeof(int32_t))) return FALSE; + } else { + op->size = 32; + if (!read_stream_safe_uint(ci, &di->imm.dword, sizeof(int32_t))) return FALSE; + } + break; + case OT_SEIMM8: /* Sign extended immediate. */ + /* + * PUSH SEIMM8 can be prefixed by operand size: + * Input stream: 66, 6a, 55 + * 64bits DT: push small 55 + * 32bits DT: push small 55 + * 16bits DT: push large 55 + * small/large indicates the size of the eSP pointer advancement. + * Check the instFlags (ii->flags) if it can be operand-size-prefixed and if the prefix exists. + */ + op->type = O_IMM; + if ((instFlags & INST_PRE_OP_SIZE) && (ps->decodedPrefixes & INST_PRE_OP_SIZE)) { + ps->usedPrefixes |= INST_PRE_OP_SIZE; + switch (ci->dt) + { + case Decode16Bits: op->size = 32; break; + case Decode32Bits: + case Decode64Bits: + op->size = 16; + break; + } + } else op->size = 8; + di->flags |= FLAG_IMM_SIGNED; + if (!read_stream_safe_sint(ci, &di->imm.sqword, sizeof(int8_t))) return FALSE; + break; + case OT_IMM16_1: + operands_set_ts(op, O_IMM1, 16); + if (!read_stream_safe_uint(ci, &di->imm.ex.i1, sizeof(int16_t))) return FALSE; + break; + case OT_IMM8_1: + operands_set_ts(op, O_IMM1, 8); + if (!read_stream_safe_uint(ci, &di->imm.ex.i1, sizeof(int8_t))) return FALSE; + break; + case OT_IMM8_2: + operands_set_ts(op, O_IMM2, 8); + if (!read_stream_safe_uint(ci, &di->imm.ex.i2, sizeof(int8_t))) return FALSE; + break; + case OT_REG8: + operands_set_ts(op, O_REG, 8); + if (ps->prefixExtType) { + /* + * If REX prefix is valid then we will have to use low bytes. + * This is a PASSIVE behavior changer of REX prefix, it affects operands even if its value is 0x40 ! + */ + ps->usedPrefixes |= INST_PRE_REX; + op->index = (uint8_t)operands_fix_8bit_rex_base(reg + ((vrex & PREFIX_EX_R) ? EX_GPR_BASE : 0)); + } else op->index = (uint8_t)(REGS8_BASE + reg); + break; + case OT_REG16: + operands_set_tsi(op, O_REG, 16, REGS16_BASE + reg); + break; + case OT_REG_FULL: + switch (effOpSz) + { + case Decode16Bits: + ps->usedPrefixes |= INST_PRE_OP_SIZE; + if (vrex & PREFIX_EX_R) { + ps->usedPrefixes |= INST_PRE_REX; + reg += EX_GPR_BASE; + } + operands_set_tsi(op, O_REG, 16, REGS16_BASE + reg); + break; + case Decode32Bits: + if (vrex & PREFIX_EX_R) { + ps->usedPrefixes |= INST_PRE_REX; + reg += EX_GPR_BASE; + } else ps->usedPrefixes |= INST_PRE_OP_SIZE; + operands_set_tsi(op, O_REG, 32, REGS32_BASE + reg); + break; + case Decode64Bits: /* rex must be presented. */ + ps->usedPrefixes |= INST_PRE_REX; + operands_set_tsi(op, O_REG, 64, REGS64_BASE + reg + ((vrex & PREFIX_EX_R) ? EX_GPR_BASE : 0)); + break; + } + break; + case OT_REG32: + if (vrex & PREFIX_EX_R) { + ps->usedPrefixes |= INST_PRE_REX; + reg += EX_GPR_BASE; + } + operands_set_tsi(op, O_REG, 32, REGS32_BASE + reg); + break; + case OT_REG32_64: /* Handle CVT's, MOVxX and MOVNTI instructions which could be extended to 64 bits registers with REX. */ + if (vrex & PREFIX_EX_R) { + ps->usedPrefixes |= INST_PRE_REX; + reg += EX_GPR_BASE; + } + + /* Is it a promoted instruction? (only INST_64BITS is set and REX isn't required.) */ + if ((ci->dt == Decode64Bits) && ((instFlags & (INST_64BITS | INST_PRE_REX)) == INST_64BITS)) { + operands_set_tsi(op, O_REG, 64, REGS64_BASE + reg); + break; + } + /* Give a chance to REX.W. Because if it was a promoted instruction we don't care about REX.W anyways. */ + if (vrex & PREFIX_EX_W) { + ps->usedPrefixes |= INST_PRE_REX; + operands_set_tsi(op, O_REG, 64, REGS64_BASE + reg); + } else operands_set_tsi(op, O_REG, 32, REGS32_BASE + reg); + break; + case OT_FREG32_64_RM: /* Force decoding mode. Used for MOV CR(n)/DR(n) which defaults to 64 bits operand size in 64 bits. */ + if (vrex & PREFIX_EX_B) { + ps->usedPrefixes |= INST_PRE_REX; + rm += EX_GPR_BASE; + } + + if (ci->dt == Decode64Bits) operands_set_tsi(op, O_REG, 64, REGS64_BASE + rm); + else operands_set_tsi(op, O_REG, 32, REGS32_BASE + rm); + break; + case OT_MM: /* MMX register */ + operands_set_tsi(op, O_REG, 64, MMXREGS_BASE + reg); + break; + case OT_MM_RM: /* MMX register, this time from the RM field */ + operands_set_tsi(op, O_REG, 64, MMXREGS_BASE + rm); + break; + case OT_REGXMM0: /* Implicit XMM0 operand. */ + reg = 0; + vrex = 0; + /* FALL THROUGH */ + case OT_XMM: /* SSE register */ + if (vrex & PREFIX_EX_R) { + ps->usedPrefixes |= INST_PRE_REX; + reg += EX_GPR_BASE; + } + operands_set_tsi(op, O_REG, 128, SSEREGS_BASE + reg); + break; + case OT_XMM_RM: /* SSE register, this time from the RM field */ + if (vrex & PREFIX_EX_B) { + ps->usedPrefixes |= INST_PRE_REX; + rm += EX_GPR_BASE; + } + operands_set_tsi(op, O_REG, 128, SSEREGS_BASE + rm); + break; + case OT_CREG: + /* + * Don't parse if the reg exceeds the bounds of the array. + * Most of the CR's are not implemented, so if there's no matching string, the operand is invalid. + */ + if (vrex & PREFIX_EX_R) { + ps->usedPrefixes |= INST_PRE_REX; + reg += EX_GPR_BASE; + } else if ((ci->dt == Decode32Bits) && (ps->decodedPrefixes & INST_PRE_LOCK)) { + /* + * NOTE: In 32 bits decoding mode, + * if the lock prefix is set before MOV CR(n) it will become the 4th bit of the REG field like REX.R in 64 bits. + */ + reg += EX_GPR_BASE; + ps->usedPrefixes |= INST_PRE_LOCK; + } + /* Ignore some registers which do not exist. */ + if ((reg >= CREGS_MAX) || (reg == 1) || ((reg >= 5) && (reg <= 7))) return FALSE; + + op->type = O_REG; + if (ci->dt == Decode64Bits) op->size = 64; + else op->size = 32; + op->index = (uint8_t)(CREGS_BASE + reg); + break; + case OT_DREG: + /* + * In 64 bits there are 16 debug registers. + * but accessing any of dr8-15 which aren't implemented will cause an #ud. + */ + if ((reg == 4) || (reg == 5) || (vrex & PREFIX_EX_R)) return FALSE; + + op->type = O_REG; + if (ci->dt == Decode64Bits) op->size = 64; + else op->size = 32; + op->index = (uint8_t)(DREGS_BASE + reg); + break; + case OT_SREG: /* Works with REG16 only! */ + /* If lockableInstruction pointer is non-null we know it's the first operand. */ + if (lockableInstruction && (reg == 1)) return FALSE; /* Can't MOV CS, . */ + /*Don't parse if the reg exceeds the bounds of the array. */ + if (reg <= SEG_REGS_MAX - 1) operands_set_tsi(op, O_REG, 16, SREGS_BASE + reg); + else return FALSE; + break; + case OT_SEG: + op->type = O_REG; + /* Size of reg is always 16, it's up to caller to zero extend it to operand size. */ + op->size = 16; + ps->usedPrefixes |= INST_PRE_OP_SIZE; + /* + * Extract the SEG from ii->flags this time!!! + * Check whether an operand size prefix is used. + */ + switch (instFlags & INST_PRE_SEGOVRD_MASK) + { + case INST_PRE_ES: op->index = R_ES; break; + case INST_PRE_CS: op->index = R_CS; break; + case INST_PRE_SS: op->index = R_SS; break; + case INST_PRE_DS: op->index = R_DS; break; + case INST_PRE_FS: op->index = R_FS; break; + case INST_PRE_GS: op->index = R_GS; break; + } + break; + case OT_ACC8: + operands_set_tsi(op, O_REG, 8, R_AL); + break; + case OT_ACC16: + operands_set_tsi(op, O_REG, 16, R_AX); + break; + case OT_ACC_FULL_NOT64: /* No REX.W support for IN/OUT. */ + vrex &= ~PREFIX_EX_W; + case OT_ACC_FULL: + if (effOpSz == Decode16Bits) { + ps->usedPrefixes |= INST_PRE_OP_SIZE; + operands_set_tsi(op, O_REG, 16, R_AX); + } else if (effOpSz == Decode32Bits) { + ps->usedPrefixes |= INST_PRE_OP_SIZE; + operands_set_tsi(op, O_REG, 32, R_EAX); + } else { /* Decode64Bits */ + /* Only non-promoted instructions need REX in order to decode in 64 bits. */ + /* MEM-OFFSET MOV's are NOT automatically promoted to 64 bits. */ + if (~instFlags & INST_64BITS) { + ps->usedPrefixes |= INST_PRE_REX; + } + operands_set_tsi(op, O_REG, 64, R_RAX); + } + break; + case OT_PTR16_FULL: + /* ptr16:full - full is size of operand size to read, therefore Operand Size Prefix affects this. So we need to handle it. */ + if (effOpSz == Decode16Bits) { + ps->usedPrefixes |= INST_PRE_OP_SIZE; + ci->codeLen -= sizeof(int16_t)*2; + if (ci->codeLen < 0) return FALSE; + + operands_set_ts(op, O_PTR, 16); + di->imm.ptr.off = RUSHORT(ci->code); /* Read offset first. */ + di->imm.ptr.seg = RUSHORT((ci->code + sizeof(int16_t))); /* And read segment. */ + + ci->code += sizeof(int16_t)*2; + } else { /* Decode32Bits, for Decode64Bits this instruction is invalid. */ + ps->usedPrefixes |= INST_PRE_OP_SIZE; + ci->codeLen -= sizeof(int32_t) + sizeof(int16_t); + if (ci->codeLen < 0) return FALSE; + + operands_set_ts(op, O_PTR, 32); + di->imm.ptr.off = RULONG(ci->code); /* Read 32bits offset this time. */ + di->imm.ptr.seg = RUSHORT((ci->code + sizeof(int32_t))); /* And read segment, 16 bits. */ + + ci->code += sizeof(int32_t) + sizeof(int16_t); + } + break; + case OT_RELCB: + case OT_RELC_FULL: + + if (type == OT_RELCB) { + operands_set_ts(op, O_PC, 8); + if (!read_stream_safe_sint(ci, &di->imm.sqword, sizeof(int8_t))) return FALSE; + } else { /* OT_RELC_FULL */ + + /* Yep, operand size prefix affects relc also. */ + ps->usedPrefixes |= INST_PRE_OP_SIZE; + if (effOpSz == Decode16Bits) { + operands_set_ts(op, O_PC, 16); + if (!read_stream_safe_sint(ci, &di->imm.sqword, sizeof(int16_t))) return FALSE; + } else { /* Decode32Bits or Decode64Bits = for now they are the same */ + operands_set_ts(op, O_PC, 32); + if (!read_stream_safe_sint(ci, &di->imm.sqword, sizeof(int32_t))) return FALSE; + } + } + + /* Support for hint, see if there's a segment override. */ + if ((ii->opcodeId >= I_JO) && (ii->opcodeId <= I_JG)) { + if (ps->decodedPrefixes & INST_PRE_CS) { + ps->usedPrefixes |= INST_PRE_CS; + di->flags |= FLAG_HINT_NOT_TAKEN; + } else if (ps->decodedPrefixes & INST_PRE_DS) { + ps->usedPrefixes |= INST_PRE_DS; + di->flags |= FLAG_HINT_TAKEN; + } + } + break; + case OT_MOFFS8: + op->size = 8; + /* FALL THROUGH, size won't be changed. */ + case OT_MOFFS_FULL: + op->type = O_DISP; + if (op->size == 0) { + /* Calculate size of operand (same as ACC size). */ + switch (effOpSz) + { + case Decode16Bits: op->size = 16; break; + case Decode32Bits: op->size = 32; break; + case Decode64Bits: op->size = 64; break; + } + } + + prefixes_use_segment(INST_PRE_DS, ps, ci->dt, di); + + /* + * Just a pointer to a BYTE, WORD, DWORD, QWORD. Works only with ACC8/16/32/64 respectively. + * MOV [0x1234], AL ; MOV AX, [0x1234] ; MOV EAX, [0x1234], note that R/E/AX will be chosen by OT_ACC_FULL. + */ + if (effAdrSz == Decode16Bits) { + ps->usedPrefixes |= INST_PRE_ADDR_SIZE; + + di->dispSize = 16; + if (!read_stream_safe_uint(ci, &di->disp, sizeof(int16_t))) return FALSE; + } else if (effAdrSz == Decode32Bits) { + ps->usedPrefixes |= INST_PRE_ADDR_SIZE; + + di->dispSize = 32; + if (!read_stream_safe_uint(ci, &di->disp, sizeof(int32_t))) return FALSE; + } else { /* Decode64Bits */ + di->dispSize = 64; + if (!read_stream_safe_uint(ci, &di->disp, sizeof(int64_t))) return FALSE; + } + break; + case OT_CONST1: + operands_set_ts(op, O_IMM, 8); + di->imm.byte = 1; + break; + case OT_REGCL: + operands_set_tsi(op, O_REG, 8, R_CL); + break; + + case OT_FPU_SI: + /* Low 3 bits specify the REG, similar to the MODR/M byte reg. */ + operands_set_tsi(op, O_REG, 32, FPUREGS_BASE + (*(ci->code-1) & 7)); + break; + case OT_FPU_SSI: + operands_set_tsi(op, O_REG, 32, R_ST0); + operands_set_tsi(op + 1, O_REG, 32, FPUREGS_BASE + (*(ci->code-1) & 7)); + break; + case OT_FPU_SIS: + operands_set_tsi(op, O_REG, 32, FPUREGS_BASE + (*(ci->code-1) & 7)); + operands_set_tsi(op + 1, O_REG, 32, R_ST0); + break; + + /* + * Special treatment for Instructions-Block: + * INC/DEC (only 16/32 bits) /PUSH/POP/XCHG instructions, which get their REG from their own binary code. + + * Notice these instructions are 1 or 2 byte long, + * code points after the byte which represents the instruction itself, + * thus, even if the instructions are 2 bytes long it will read its last byte which contains the REG info. + */ + case OT_IB_RB: + /* Low 3 bits specify the REG, similar to the MODR/M byte reg. */ + operands_set_ts(op, O_REG, 8); + reg = *(ci->code-1) & 7; + if (vrex & PREFIX_EX_B) { + ps->usedPrefixes |= INST_PRE_REX; + op->index = (uint8_t)operands_fix_8bit_rex_base(reg + EX_GPR_BASE); + } else if (ps->prefixExtType == PET_REX) { + ps->usedPrefixes |= INST_PRE_REX; + op->index = (uint8_t)operands_fix_8bit_rex_base(reg); + } else op->index = (uint8_t)(REGS8_BASE + reg); + break; + case OT_IB_R_FULL: + reg = *(ci->code-1) & 7; + switch (effOpSz) + { + case Decode16Bits: + ps->usedPrefixes |= INST_PRE_OP_SIZE; + if (vrex & PREFIX_EX_B) { + ps->usedPrefixes |= INST_PRE_REX; + reg += EX_GPR_BASE; + } + operands_set_tsi(op, O_REG, 16, REGS16_BASE + reg); + break; + case Decode32Bits: + if (vrex & PREFIX_EX_B) { + ps->usedPrefixes |= INST_PRE_REX; + reg += EX_GPR_BASE; + } else ps->usedPrefixes |= INST_PRE_OP_SIZE; + operands_set_tsi(op, O_REG, 32, REGS32_BASE + reg); + break; + case Decode64Bits: + /* + * Automatically promoted instruction can drop REX prefix if not required. + * PUSH/POP defaults to 64 bits. --> INST_64BITS + * MOV imm64 / BSWAP requires REX.W to be 64 bits --> INST_64BITS | INST_PRE_REX + */ + if ((instFlags & INST_64BITS) && ((instFlags & INST_PRE_REX) == 0)) { + if (vrex & PREFIX_EX_B) { + ps->usedPrefixes |= INST_PRE_REX; + reg += EX_GPR_BASE; + } + } else { + ps->usedPrefixes |= INST_PRE_REX; + reg += (vrex & PREFIX_EX_B) ? EX_GPR_BASE : 0; + } + operands_set_tsi(op, O_REG, 64, REGS64_BASE + reg); + break; + } + break; + + /* + * Special treatment for repeatable instructions. + + * We want the following output: + * If there's only the REP/NZ prefix, we won't output anything (All operands are implicit). + * If there's an operand size prefix, we will change the suffix letter of the mnemonic, which specifies the size of operand to the required one. + * If there's a segment override prefix, we will output the segment and the used index register (EDI/ESI). + * If there's an address size prefix, we will output the (segment if needed and) the used and inverted index register (DI/SI). + + * Example: + * :: Decoding in 16 bits mode! :: + * AD ~ LODSW + * 66 AD ~ LODSD + * F3 AC ~ REP LODSB + * F3 66 AD ~ REP LODSD + * F3 3E AC ~ REP LODS BYTE DS:[SI] + * F3 67 AD ~ REP LODS WORD [ESI] + + * The basic form of a repeatable instruction has its operands hidden and has a suffix letter + * which implies on the size of operation being done. + * Therefore, we cannot change the mnemonic here when we encounter another prefix and its not the decoder's responsibility to do so. + * That's why the caller is responsible to add the suffix letter if no other prefixes are used. + * And all we are doing here is formatting the operand correctly. + */ + case OT_REGI_ESI: + ps->usedPrefixes |= INST_PRE_ADDR_SIZE; + + op->type = O_SMEM; + + /* This might be a 16, 32 or 64 bits instruction, depends on the decoding mode. */ + if (instFlags & INST_16BITS) { + ps->usedPrefixes |= INST_PRE_OP_SIZE; + + if (effOpSz == Decode16Bits) op->size = 16; + else if ((effOpSz == Decode64Bits) && (instFlags & INST_64BITS)) { + ps->usedPrefixes |= INST_PRE_REX; + op->size = 64; + } else op->size = 32; + } else op->size = 8; + + /* + * Clear segment in case OT_REGI_EDI was parsed earlier, + * DS can be overridden and therefore has precedence. + */ + di->segment = 0; + prefixes_use_segment(INST_PRE_DS, ps, ci->dt, di); + + if (effAdrSz == Decode16Bits) op->index = R_SI; + else if (effAdrSz == Decode32Bits) op->index = R_ESI; + else op->index = R_RSI; + break; + case OT_REGI_EDI: + ps->usedPrefixes |= INST_PRE_ADDR_SIZE; + + op->type = O_SMEM; + + /* This might be a 16 or 32 bits instruction, depends on the decoding mode. */ + if (instFlags & INST_16BITS) { + ps->usedPrefixes |= INST_PRE_OP_SIZE; + + if (effOpSz == Decode16Bits) op->size = 16; + else if ((effOpSz == Decode64Bits) && (instFlags & INST_64BITS)) { + ps->usedPrefixes |= INST_PRE_REX; + op->size = 64; + } else op->size = 32; + } else op->size = 8; + + /* Note: The [rDI] operand can't be prefixed by a segment override, therefore we don't set usedPrefixes. */ + if ((opNum == ONT_1) && (ci->dt != Decode64Bits)) di->segment = R_ES | SEGMENT_DEFAULT; /* No ES in 64 bits mode. */ + + if (effAdrSz == Decode16Bits) op->index = R_DI; + else if (effAdrSz == Decode32Bits) op->index = R_EDI; + else op->index = R_RDI; + break; + + /* Used for In/Out instructions varying forms. */ + case OT_REGDX: + /* Simple single IN/OUT instruction. */ + operands_set_tsi(op, O_REG, 16, R_DX); + break; + + /* Used for INVLPGA instruction. */ + case OT_REGECX: + operands_set_tsi(op, O_REG, 32, R_ECX); + break; + case OT_REGI_EBXAL: + /* XLAT BYTE [rBX + AL] */ + ps->usedPrefixes |= INST_PRE_ADDR_SIZE; + + prefixes_use_segment(INST_PRE_DS, ps, ci->dt, di); + + /* Size of deref is always 8 for xlat. */ + operands_set_tsi(op, O_MEM, 8, R_AL); + + if (effAdrSz == Decode16Bits) di->base = R_BX; + else if (effAdrSz == Decode32Bits) di->base = R_EBX; + else { + ps->usedPrefixes |= INST_PRE_REX; + di->base = R_RBX; + } + break; + case OT_REGI_EAX: + /* + * Implicit rAX as memory indirection operand. Used by AMD's SVM instructions. + * Since this is a memory indirection, the default address size in 64bits decoding mode is 64. + */ + + if (effAdrSz == Decode64Bits) operands_set_tsi(op, O_SMEM, 64, R_RAX); + else if (effAdrSz == Decode32Bits) { + ps->usedPrefixes |= INST_PRE_ADDR_SIZE; + operands_set_tsi(op, O_SMEM, 32, R_EAX); + } else { + ps->usedPrefixes |= INST_PRE_ADDR_SIZE; + operands_set_tsi(op, O_SMEM, 16, R_AX); + } + break; + case OT_VXMM: + operands_set_tsi(op, O_REG, 128, SSEREGS_BASE + vexV); + break; + case OT_XMM_IMM: + ci->codeLen -= sizeof(int8_t); + if (ci->codeLen < 0) return FALSE; + + if (ci->dt == Decode32Bits) reg = (*ci->code >> 4) & 0x7; + else reg = (*ci->code >> 4) & 0xf; + operands_set_tsi(op, O_REG, 128, SSEREGS_BASE + reg); + + ci->code += sizeof(int8_t); + break; + case OT_YXMM: + if (vrex & PREFIX_EX_R) reg += EX_GPR_BASE; + if (ps->vrex & PREFIX_EX_L) operands_set_tsi(op, O_REG, 256, AVXREGS_BASE + reg); + else operands_set_tsi(op, O_REG, 128, SSEREGS_BASE + reg); + break; + case OT_YXMM_IMM: + ci->codeLen -= sizeof(int8_t); + if (ci->codeLen < 0) return FALSE; + + if (ci->dt == Decode32Bits) reg = (*ci->code >> 4) & 0x7; + else reg = (*ci->code >> 4) & 0xf; + + if (ps->vrex & PREFIX_EX_L) operands_set_tsi(op, O_REG, 256, AVXREGS_BASE + reg); + else operands_set_tsi(op, O_REG, 128, SSEREGS_BASE + reg); + + ci->code += sizeof(int8_t); + break; + case OT_YMM: + if (vrex & PREFIX_EX_R) reg += EX_GPR_BASE; + operands_set_tsi(op, O_REG, 256, AVXREGS_BASE + reg); + break; + case OT_VYMM: + operands_set_tsi(op, O_REG, 256, AVXREGS_BASE + vexV); + break; + case OT_VYXMM: + if (ps->vrex & PREFIX_EX_L) operands_set_tsi(op, O_REG, 256, AVXREGS_BASE + vexV); + else operands_set_tsi(op, O_REG, 128, SSEREGS_BASE + vexV); + break; + case OT_WREG32_64: + if (vrex & PREFIX_EX_R) reg += EX_GPR_BASE; + if (ps->vrex & PREFIX_EX_W) operands_set_tsi(op, O_REG, 64, REGS64_BASE + reg); + else operands_set_tsi(op, O_REG, 32, REGS32_BASE + reg); + break; + default: return FALSE; + } + + if ((op->type == O_REG) || (op->type == O_SMEM) || (op->type == O_MEM)) { + di->usedRegistersMask |= _REGISTERTORCLASS[op->index]; + } + + return TRUE; +} diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/operands.h b/module/src/main/cpp/whale/src/dbi/x86/distorm/operands.h new file mode 100644 index 00000000..2d916474 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/operands.h @@ -0,0 +1,28 @@ +/* +operands.h + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ + + +#ifndef OPERANDS_H +#define OPERANDS_H + +#include "config.h" +#include "decoder.h" +#include "prefix.h" +#include "instructions.h" + + +extern uint32_t _REGISTERTORCLASS[]; + +int operands_extract(_CodeInfo* ci, _DInst* di, _InstInfo* ii, + _iflags instFlags, _OpType type, _OperandNumberType opNum, + unsigned int modrm, _PrefixState* ps, _DecodeType effOpSz, + _DecodeType effAdrSz, int* lockableInstruction); + +#endif /* OPERANDS_H */ diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/prefix.c b/module/src/main/cpp/whale/src/dbi/x86/distorm/prefix.c new file mode 100644 index 00000000..f6703d74 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/prefix.c @@ -0,0 +1,368 @@ +/* +prefix.c + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ + + +#include "prefix.h" + +#include "x86defs.h" +#include "instructions.h" +#include "mnemonics.h" + + +/* + * The main purpose of this module is to keep track of all kind of prefixes a single instruction may have. + * The problem is that a single instruction may have up to six different prefix-types. + * That's why I have to detect such cases and drop those excess prefixes. + */ + +int prefixes_is_valid(unsigned int ch, _DecodeType dt) +{ + switch (ch) { + /* for i in xrange(0x40, 0x50): print "case 0x%2x:" % i */ + case 0x40: /* REX: */ + case 0x41: + case 0x42: + case 0x43: + case 0x44: + case 0x45: + case 0x46: + case 0x47: + case 0x48: + case 0x49: + case 0x4a: + case 0x4b: + case 0x4c: + case 0x4d: + case 0x4e: + case 0x4f: return (dt == Decode64Bits); + case PREFIX_LOCK: return TRUE; + case PREFIX_REPNZ: return TRUE; + case PREFIX_REP: return TRUE; + case PREFIX_CS: return TRUE; + case PREFIX_SS: return TRUE; + case PREFIX_DS: return TRUE; + case PREFIX_ES: return TRUE; + case PREFIX_FS: return TRUE; + case PREFIX_GS: return TRUE; + case PREFIX_OP_SIZE: return TRUE; + case PREFIX_ADDR_SIZE: return TRUE; + /* The VEXs might be false positives, the decode_perfixes will determine for sure. */ + case PREFIX_VEX2b: /* VEX is supported for all modes, because 16 bits Pmode is included. */ + case PREFIX_VEX3b: return TRUE; + } + return FALSE; +} + +/* Ignore a specific prefix type. */ +void prefixes_ignore(_PrefixState* ps, _PrefixIndexer pi) +{ + /* + * If that type of prefix appeared already, set the bit of that *former* prefix. + * Anyway, set the new index of that prefix type to the current index, so next time we know its position. + */ + if (ps->pfxIndexer[pi] != PFXIDX_NONE) ps->unusedPrefixesMask |= (1 << ps->pfxIndexer[pi]); +} + +/* Ignore all prefix. */ +void prefixes_ignore_all(_PrefixState* ps) +{ + int i; + for (i = 0; i < PFXIDX_MAX; i++) + prefixes_ignore(ps, i); +} + +/* Calculates which prefixes weren't used and accordingly sets the bits in the unusedPrefixesMask. */ +uint16_t prefixes_set_unused_mask(_PrefixState* ps) +{ + /* + * The decodedPrefixes represents the prefixes that were *read* from the binary stream for the instruction. + * The usedPrefixes represents the prefixes that were actually used by the instruction in the *decode* phase. + * Xoring between the two will result in a 'diff' which returns the prefixes that were read + * from the stream *and* that were never used in the actual decoding. + * + * Only one prefix per type can be set in decodedPrefixes from the stream. + * Therefore it's enough to check each type once and set the flag accordingly. + * That's why we had to book-keep each prefix type and its position. + * So now we know which bits we need to set exactly in the mask. + */ + _iflags unusedPrefixesDiff = ps->decodedPrefixes ^ ps->usedPrefixes; + + /* Examine unused prefixes by type: */ + /* + * About REX: it might be set in the diff although it was never in the stream itself. + * This is because the vrex is shared between VEX and REX and some places flag it as REX usage, while + * we were really decoding an AVX instruction. + * It's not a big problem, because the prefixes_ignore func will ignore it anyway, + * since it wasn't seen earlier. But it's important to know this. + */ + if (unusedPrefixesDiff & INST_PRE_REX) prefixes_ignore(ps, PFXIDX_REX); + if (unusedPrefixesDiff & INST_PRE_SEGOVRD_MASK) prefixes_ignore(ps, PFXIDX_SEG); + if (unusedPrefixesDiff & INST_PRE_LOKREP_MASK) prefixes_ignore(ps, PFXIDX_LOREP); + if (unusedPrefixesDiff & INST_PRE_OP_SIZE) prefixes_ignore(ps, PFXIDX_OP_SIZE); + if (unusedPrefixesDiff & INST_PRE_ADDR_SIZE) prefixes_ignore(ps, PFXIDX_ADRS); + /* If a VEX instruction was found, its prefix is considered as used, therefore no point for checking for it. */ + + return ps->unusedPrefixesMask; +} + +/* + * Mark a prefix as unused, and bookkeep where we last saw this same type, + * because in the future we might want to disable it too. + */ +_INLINE_ void prefixes_track_unused(_PrefixState* ps, int index, _PrefixIndexer pi) +{ + prefixes_ignore(ps, pi); + /* Book-keep the current index for this type. */ + ps->pfxIndexer[pi] = index; +} + +/* + * Read as many prefixes as possible, up to 15 bytes, and halt when we encounter non-prefix byte. + * This algorithm tries to imitate a real processor, where the same prefix can appear a few times, etc. + * The tiny complexity is that we want to know when a prefix was superfluous and mark any copy of it as unused. + * Note that the last prefix of its type will be considered as used, and all the others (of same type) before it as unused. + */ +void prefixes_decode(const uint8_t* code, int codeLen, _PrefixState* ps, _DecodeType dt) +{ + int index, done; + uint8_t vex; + + /* + * First thing to do, scan for prefixes, there are six types of prefixes. + * There may be up to six prefixes before a single instruction, not the same type, no special order, + * except REX/VEX must precede immediately the first opcode byte. + * BTW - This is the reason why I didn't make the REP prefixes part of the instructions (STOS/SCAS/etc). + * + * Another thing, the instruction maximum size is 15 bytes, thus if we read more than 15 bytes, we will halt. + * + * We attach all prefixes to the next instruction, there might be two or more occurrences from the same prefix. + * Also, since VEX can be allowed only once we will test it separately. + */ + for (index = 0, done = FALSE; + (codeLen > 0) && (code - ps->start < INST_MAXIMUM_SIZE); + code++, codeLen--, index++) { + /* + NOTE: AMD treat lock/rep as two different groups... But I am based on Intel. + + - Lock and Repeat: + - 0xF0 � LOCK + - 0xF2 � REPNE/REPNZ + - 0xF3 - REP/REPE/REPZ + - Segment Override: + - 0x2E - CS + - 0x36 - SS + - 0x3E - DS + - 0x26 - ES + - 0x64 - FS + - 0x65 - GS + - Operand-Size Override: 0x66, switching default size. + - Address-Size Override: 0x67, switching default size. + + 64 Bits: + - REX: 0x40 - 0x4f, extends register access. + - 2 Bytes VEX: 0xc4 + - 3 Bytes VEX: 0xc5 + 32 Bits: + - 2 Bytes VEX: 0xc4 11xx-xxxx + - 3 Bytes VEX: 0xc5 11xx-xxxx + */ + + /* Examine what type of prefix we got. */ + switch (*code) + { + /* REX type, 64 bits decoding mode only: */ + case 0x40: + case 0x41: + case 0x42: + case 0x43: + case 0x44: + case 0x45: + case 0x46: + case 0x47: + case 0x48: + case 0x49: + case 0x4a: + case 0x4b: + case 0x4c: + case 0x4d: + case 0x4e: + case 0x4f: + if (dt == Decode64Bits) { + ps->decodedPrefixes |= INST_PRE_REX; + ps->vrex = *code & 0xf; /* Keep only BXRW. */ + ps->rexPos = code; + ps->prefixExtType = PET_REX; + prefixes_track_unused(ps, index, PFXIDX_REX); + } else done = TRUE; /* If we are not in 64 bits mode, it's an instruction, then halt. */ + break; + + /* LOCK and REPx type: */ + case PREFIX_LOCK: + ps->decodedPrefixes |= INST_PRE_LOCK; + prefixes_track_unused(ps, index, PFXIDX_LOREP); + break; + case PREFIX_REPNZ: + ps->decodedPrefixes |= INST_PRE_REPNZ; + prefixes_track_unused(ps, index, PFXIDX_LOREP); + break; + case PREFIX_REP: + ps->decodedPrefixes |= INST_PRE_REP; + prefixes_track_unused(ps, index, PFXIDX_LOREP); + break; + + /* Seg Overide type: */ + case PREFIX_CS: + ps->decodedPrefixes |= INST_PRE_CS; + prefixes_track_unused(ps, index, PFXIDX_SEG); + break; + case PREFIX_SS: + ps->decodedPrefixes |= INST_PRE_SS; + prefixes_track_unused(ps, index, PFXIDX_SEG); + break; + case PREFIX_DS: + ps->decodedPrefixes |= INST_PRE_DS; + prefixes_track_unused(ps, index, PFXIDX_SEG); + break; + case PREFIX_ES: + ps->decodedPrefixes |= INST_PRE_ES; + prefixes_track_unused(ps, index, PFXIDX_SEG); + break; + case PREFIX_FS: + ps->decodedPrefixes |= INST_PRE_FS; + prefixes_track_unused(ps, index, PFXIDX_SEG); + break; + case PREFIX_GS: + ps->decodedPrefixes |= INST_PRE_GS; + prefixes_track_unused(ps, index, PFXIDX_SEG); + break; + + /* Op Size type: */ + case PREFIX_OP_SIZE: + ps->decodedPrefixes |= INST_PRE_OP_SIZE; + prefixes_track_unused(ps, index, PFXIDX_OP_SIZE); + break; + + /* Addr Size type: */ + case PREFIX_ADDR_SIZE: + ps->decodedPrefixes |= INST_PRE_ADDR_SIZE; + prefixes_track_unused(ps, index, PFXIDX_ADRS); + break; + + /* Non-prefix byte now, so break 2. */ + default: done = TRUE; break; + } + if (done) break; + } + + /* 2 Bytes VEX: */ + if ((codeLen >= 2) && + (*code == PREFIX_VEX2b) && + ((code - ps->start) <= INST_MAXIMUM_SIZE - 2)) { + /* + * In 32 bits the second byte has to be in the special range of Mod=11. + * Otherwise it might be a normal LDS instruction. + */ + if ((dt == Decode64Bits) || (*(code + 1) >= INST_DIVIDED_MODRM)) { + ps->vexPos = code + 1; + ps->decodedPrefixes |= INST_PRE_VEX; + ps->prefixExtType = PET_VEX2BYTES; + + /* + * VEX 1 byte bits: + * |7-6--3-2-10| + * |R|vvvv|L|pp| + * |-----------| + */ + + /* -- Convert from VEX prefix to VREX flags -- */ + vex = *ps->vexPos; + if (~vex & 0x80 && dt == Decode64Bits) ps->vrex |= PREFIX_EX_R; /* Convert VEX.R. */ + if (vex & 4) ps->vrex |= PREFIX_EX_L; /* Convert VEX.L. */ + + code += 2; + } + } + + /* 3 Bytes VEX: */ + if ((codeLen >= 3) && + (*code == PREFIX_VEX3b) && + ((code - ps->start) <= INST_MAXIMUM_SIZE - 3) && + (~ps->decodedPrefixes & INST_PRE_VEX)) { + /* + * In 32 bits the second byte has to be in the special range of Mod=11. + * Otherwise it might be a normal LES instruction. + * And we don't care now about the 3rd byte. + */ + if ((dt == Decode64Bits) || (*(code + 1) >= INST_DIVIDED_MODRM)) { + ps->vexPos = code + 1; + ps->decodedPrefixes |= INST_PRE_VEX; + ps->prefixExtType = PET_VEX3BYTES; + + /* + * VEX first and second bytes: + * |7-6-5-4----0| |7-6--3-2-10| + * |R|X|B|m-mmmm| |W|vvvv|L|pp| + * |------------| |-----------| + */ + + /* -- Convert from VEX prefix to VREX flags -- */ + vex = *ps->vexPos; + ps->vrex |= ((~vex >> 5) & 0x7); /* Shift and invert VEX.R/X/B to their place */ + vex = *(ps->vexPos + 1); + if (vex & 4) ps->vrex |= PREFIX_EX_L; /* Convert VEX.L. */ + if (vex & 0x80) ps->vrex |= PREFIX_EX_W; /* Convert VEX.W. */ + + /* Clear some flags if the mode isn't 64 bits. */ + if (dt != Decode64Bits) ps->vrex &= ~(PREFIX_EX_B | PREFIX_EX_X | PREFIX_EX_R | PREFIX_EX_W); + + code += 3; + } + } + + /* + * Save last byte scanned address, so the decoder could keep on scanning from this point and on and on and on. + * In addition the decoder is able to know that the last byte could lead to MMX/SSE instructions (preceding REX if exists). + */ + ps->last = code; /* ps->last points to an opcode byte. */ +} + +/* + * For every memory-indirection operand we want to set its corresponding default segment. + * If the segment is being overrided, we need to see whether we use it or not. + * We will use it only if it's not the default one already. + */ +void prefixes_use_segment(_iflags defaultSeg, _PrefixState* ps, _DecodeType dt, _DInst* di) +{ + _iflags flags = 0; + if (dt == Decode64Bits) flags = ps->decodedPrefixes & INST_PRE_SEGOVRD_MASK64; + else flags = ps->decodedPrefixes & INST_PRE_SEGOVRD_MASK; + + if ((flags == 0) || (flags == defaultSeg)) { + flags = defaultSeg; + di->segment |= SEGMENT_DEFAULT; + } else if (flags != defaultSeg) { + /* Use it only if it's non-default segment. */ + ps->usedPrefixes |= flags; + } + + /* ASSERT: R_XX must be below 128. */ + switch (flags) + { + case INST_PRE_ES: di->segment |= R_ES; break; + case INST_PRE_CS: di->segment |= R_CS; break; + case INST_PRE_SS: di->segment |= R_SS; break; + case INST_PRE_DS: di->segment |= R_DS; break; + case INST_PRE_FS: di->segment |= R_FS; break; + case INST_PRE_GS: di->segment |= R_GS; break; + } + + /* If it's one of the CS,SS,DS,ES and the mode is 64 bits, set segment it to none, since it's ignored. */ + if ((dt == Decode64Bits) && (flags & INST_PRE_SEGOVRD_MASK32)) di->segment = R_NONE; +} diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/prefix.h b/module/src/main/cpp/whale/src/dbi/x86/distorm/prefix.h new file mode 100644 index 00000000..f205e697 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/prefix.h @@ -0,0 +1,64 @@ +/* +prefix.h + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ + + +#ifndef PREFIX_H +#define PREFIX_H + +#include "config.h" +#include "decoder.h" + + +/* Specifies the type of the extension prefix, such as: REX, 2 bytes VEX, 3 bytes VEX. */ +typedef enum {PET_NONE = 0, PET_REX, PET_VEX2BYTES, PET_VEX3BYTES} _PrefixExtType; + +/* Specifies an index into a table of prefixes by their type. */ +typedef enum {PFXIDX_NONE = -1, PFXIDX_REX, PFXIDX_LOREP, PFXIDX_SEG, PFXIDX_OP_SIZE, PFXIDX_ADRS, PFXIDX_MAX} _PrefixIndexer; + +/* +* This holds the prefixes state for the current instruction we decode. +* decodedPrefixes includes all specific prefixes that the instruction got. +* start is a pointer to the first prefix to take into account. +* last is a pointer to the last byte we scanned. +* Other pointers are used to keep track of prefixes positions and help us know if they appeared already and where. +*/ +typedef struct { + _iflags decodedPrefixes, usedPrefixes; + const uint8_t *start, *last, *vexPos, *rexPos; + _PrefixExtType prefixExtType; + uint16_t unusedPrefixesMask; + /* Indicates whether the operand size prefix (0x66) was used as a mandatory prefix. */ + int isOpSizeMandatory; + /* If VEX prefix is used, store the VEX.vvvv field. */ + unsigned int vexV; + /* The fields B/X/R/W/L of REX and VEX are stored together in this byte. */ + unsigned int vrex; + + /* !! Make sure pfxIndexer is LAST! Otherwise memset won't work well with it. !! */ + + /* Holds the offset to the prefix byte by its type. */ + int pfxIndexer[PFXIDX_MAX]; +} _PrefixState; + +/* +* Intel supports 6 types of prefixes, whereas AMD supports 5 types (lock is seperated from rep/nz). +* REX is the fifth prefix type, this time I'm based on AMD64. +* VEX is the 6th, though it can't be repeated. +*/ +#define MAX_PREFIXES (5) + +int prefixes_is_valid(unsigned int ch, _DecodeType dt); +void prefixes_ignore(_PrefixState* ps, _PrefixIndexer pi); +void prefixes_ignore_all(_PrefixState* ps); +uint16_t prefixes_set_unused_mask(_PrefixState* ps); +void prefixes_decode(const uint8_t* code, int codeLen, _PrefixState* ps, _DecodeType dt); +void prefixes_use_segment(_iflags defaultSeg, _PrefixState* ps, _DecodeType dt, _DInst* di); + +#endif /* PREFIX_H */ diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/textdefs.c b/module/src/main/cpp/whale/src/dbi/x86/distorm/textdefs.c new file mode 100644 index 00000000..ba094e52 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/textdefs.c @@ -0,0 +1,172 @@ +/* +textdefs.c + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ + + +#include "textdefs.h" + +#ifndef DISTORM_LIGHT + +static uint8_t Nibble2ChrTable[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; +#define NIBBLE_TO_CHR Nibble2ChrTable[t] + +void _FASTCALL_ str_hex_b(_WString* s, unsigned int x) +{ + /* + * def prebuilt(): + * s = "" + * for i in xrange(256): + * if ((i % 0x10) == 0): + * s += "\r\n" + * s += "\"%02x\", " % (i) + * return s + */ + static int8_t TextBTable[256][3] = { + "00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "0a", "0b", "0c", "0d", "0e", "0f", + "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "1a", "1b", "1c", "1d", "1e", "1f", + "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "2a", "2b", "2c", "2d", "2e", "2f", + "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "3a", "3b", "3c", "3d", "3e", "3f", + "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "4a", "4b", "4c", "4d", "4e", "4f", + "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "5a", "5b", "5c", "5d", "5e", "5f", + "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "6a", "6b", "6c", "6d", "6e", "6f", + "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", "7a", "7b", "7c", "7d", "7e", "7f", + "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "8a", "8b", "8c", "8d", "8e", "8f", + "90", "91", "92", "93", "94", "95", "96", "97", "98", "99", "9a", "9b", "9c", "9d", "9e", "9f", + "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "a8", "a9", "aa", "ab", "ac", "ad", "ae", "af", + "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7", "b8", "b9", "ba", "bb", "bc", "bd", "be", "bf", + "c0", "c1", "c2", "c3", "c4", "c5", "c6", "c7", "c8", "c9", "ca", "cb", "cc", "cd", "ce", "cf", + "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "da", "db", "dc", "dd", "de", "df", + "e0", "e1", "e2", "e3", "e4", "e5", "e6", "e7", "e8", "e9", "ea", "eb", "ec", "ed", "ee", "ef", + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "fa", "fb", "fc", "fd", "fe", "ff" + }; + + /* + * Fixed length of 3 including null terminate character. + */ + memcpy(&s->p[s->length], TextBTable[x & 255], 3); + s->length += 2; +} + +void _FASTCALL_ str_code_hb(_WString* s, unsigned int x) +{ + static int8_t TextHBTable[256][5] = { + /* + * def prebuilt(): + * s = "" + * for i in xrange(256): + * if ((i % 0x10) == 0): + * s += "\r\n" + * s += "\"0x%x\", " % (i) + * return s + */ + "0x0", "0x1", "0x2", "0x3", "0x4", "0x5", "0x6", "0x7", "0x8", "0x9", "0xa", "0xb", "0xc", "0xd", "0xe", "0xf", + "0x10", "0x11", "0x12", "0x13", "0x14", "0x15", "0x16", "0x17", "0x18", "0x19", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f", + "0x20", "0x21", "0x22", "0x23", "0x24", "0x25", "0x26", "0x27", "0x28", "0x29", "0x2a", "0x2b", "0x2c", "0x2d", "0x2e", "0x2f", + "0x30", "0x31", "0x32", "0x33", "0x34", "0x35", "0x36", "0x37", "0x38", "0x39", "0x3a", "0x3b", "0x3c", "0x3d", "0x3e", "0x3f", + "0x40", "0x41", "0x42", "0x43", "0x44", "0x45", "0x46", "0x47", "0x48", "0x49", "0x4a", "0x4b", "0x4c", "0x4d", "0x4e", "0x4f", + "0x50", "0x51", "0x52", "0x53", "0x54", "0x55", "0x56", "0x57", "0x58", "0x59", "0x5a", "0x5b", "0x5c", "0x5d", "0x5e", "0x5f", + "0x60", "0x61", "0x62", "0x63", "0x64", "0x65", "0x66", "0x67", "0x68", "0x69", "0x6a", "0x6b", "0x6c", "0x6d", "0x6e", "0x6f", + "0x70", "0x71", "0x72", "0x73", "0x74", "0x75", "0x76", "0x77", "0x78", "0x79", "0x7a", "0x7b", "0x7c", "0x7d", "0x7e", "0x7f", + "0x80", "0x81", "0x82", "0x83", "0x84", "0x85", "0x86", "0x87", "0x88", "0x89", "0x8a", "0x8b", "0x8c", "0x8d", "0x8e", "0x8f", + "0x90", "0x91", "0x92", "0x93", "0x94", "0x95", "0x96", "0x97", "0x98", "0x99", "0x9a", "0x9b", "0x9c", "0x9d", "0x9e", "0x9f", + "0xa0", "0xa1", "0xa2", "0xa3", "0xa4", "0xa5", "0xa6", "0xa7", "0xa8", "0xa9", "0xaa", "0xab", "0xac", "0xad", "0xae", "0xaf", + "0xb0", "0xb1", "0xb2", "0xb3", "0xb4", "0xb5", "0xb6", "0xb7", "0xb8", "0xb9", "0xba", "0xbb", "0xbc", "0xbd", "0xbe", "0xbf", + "0xc0", "0xc1", "0xc2", "0xc3", "0xc4", "0xc5", "0xc6", "0xc7", "0xc8", "0xc9", "0xca", "0xcb", "0xcc", "0xcd", "0xce", "0xcf", + "0xd0", "0xd1", "0xd2", "0xd3", "0xd4", "0xd5", "0xd6", "0xd7", "0xd8", "0xd9", "0xda", "0xdb", "0xdc", "0xdd", "0xde", "0xdf", + "0xe0", "0xe1", "0xe2", "0xe3", "0xe4", "0xe5", "0xe6", "0xe7", "0xe8", "0xe9", "0xea", "0xeb", "0xec", "0xed", "0xee", "0xef", + "0xf0", "0xf1", "0xf2", "0xf3", "0xf4", "0xf5", "0xf6", "0xf7", "0xf8", "0xf9", "0xfa", "0xfb", "0xfc", "0xfd", "0xfe", "0xff" + }; + + if (x < 0x10) { /* < 0x10 has a fixed length of 4 including null terminate. */ + memcpy(&s->p[s->length], TextHBTable[x & 255], 4); + s->length += 3; + } else { /* >= 0x10 has a fixed length of 5 including null terminate. */ + memcpy(&s->p[s->length], TextHBTable[x & 255], 5); + s->length += 4; + } +} + +void _FASTCALL_ str_code_hdw(_WString* s, uint32_t x) +{ + int8_t* buf; + int i = 0, shift = 0; + unsigned int t = 0; + + buf = (int8_t*)&s->p[s->length]; + + buf[0] = '0'; + buf[1] = 'x'; + buf += 2; + + for (shift = 28; shift != 0; shift -= 4) { + t = (x >> shift) & 0xf; + if (i | t) buf[i++] = NIBBLE_TO_CHR; + } + t = x & 0xf; + buf[i++] = NIBBLE_TO_CHR; + + s->length += i + 2; + buf[i] = '\0'; +} + +void _FASTCALL_ str_code_hqw(_WString* s, uint8_t src[8]) +{ + int8_t* buf; + int i = 0, shift = 0; + uint32_t x = RULONG(&src[sizeof(int32_t)]); + int t; + + buf = (int8_t*)&s->p[s->length]; + buf[0] = '0'; + buf[1] = 'x'; + buf += 2; + + for (shift = 28; shift != -4; shift -= 4) { + t = (x >> shift) & 0xf; + if (i | t) buf[i++] = NIBBLE_TO_CHR; + } + + x = RULONG(src); + for (shift = 28; shift != 0; shift -= 4) { + t = (x >> shift) & 0xf; + if (i | t) buf[i++] = NIBBLE_TO_CHR; + } + t = x & 0xf; + buf[i++] = NIBBLE_TO_CHR; + + s->length += i + 2; + buf[i] = '\0'; +} + +#ifdef SUPPORT_64BIT_OFFSET +void _FASTCALL_ str_off64(_WString* s, OFFSET_INTEGER x) +{ + int8_t* buf; + int i = 0, shift = 0; + OFFSET_INTEGER t = 0; + + buf = (int8_t*)&s->p[s->length]; + + buf[0] = '0'; + buf[1] = 'x'; + buf += 2; + + for (shift = 60; shift != 0; shift -= 4) { + t = (x >> shift) & 0xf; + if (i | t) buf[i++] = NIBBLE_TO_CHR; + } + t = x & 0xf; + buf[i++] = NIBBLE_TO_CHR; + + s->length += i + 2; + buf[i] = '\0'; +} +#endif /* SUPPORT_64BIT_OFFSET */ + +#endif /* DISTORM_LIGHT */ diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/textdefs.h b/module/src/main/cpp/whale/src/dbi/x86/distorm/textdefs.h new file mode 100644 index 00000000..a923626f --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/textdefs.h @@ -0,0 +1,57 @@ +/* +textdefs.h + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ + + +#ifndef TEXTDEFS_H +#define TEXTDEFS_H + +#include "config.h" +#include "wstring.h" + +#ifndef DISTORM_LIGHT + +#define PLUS_DISP_CHR '+' +#define MINUS_DISP_CHR '-' +#define OPEN_CHR '[' +#define CLOSE_CHR ']' +#define SP_CHR ' ' +#define SEG_OFF_CHR ':' + +/* +Naming Convention: + +* get - returns a pointer to a string. +* str - concatenates to string. + +* hex - means the function is used for hex dump (number is padded to required size) - Little Endian output. +* code - means the function is used for disassembled instruction - Big Endian output. +* off - means the function is used for 64bit offset - Big Endian output. + +* h - '0x' in front of the string. + +* b - byte +* dw - double word (can be used for word also) +* qw - quad word + +* all numbers are in HEX. +*/ + +void _FASTCALL_ str_hex_b(_WString* s, unsigned int x); +void _FASTCALL_ str_code_hb(_WString* s, unsigned int x); +void _FASTCALL_ str_code_hdw(_WString* s, uint32_t x); +void _FASTCALL_ str_code_hqw(_WString* s, uint8_t src[8]); + +#ifdef SUPPORT_64BIT_OFFSET +void _FASTCALL_ str_off64(_WString* s, OFFSET_INTEGER x); +#endif + +#endif /* DISTORM_LIGHT */ + +#endif /* TEXTDEFS_H */ diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/wstring.c b/module/src/main/cpp/whale/src/dbi/x86/distorm/wstring.c new file mode 100644 index 00000000..083200b4 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/wstring.c @@ -0,0 +1,47 @@ +/* +wstring.c + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ + + +#include "wstring.h" + +#ifndef DISTORM_LIGHT + +void strclear_WS(_WString* s) +{ + s->p[0] = '\0'; + s->length = 0; +} + +void chrcat_WS(_WString* s, uint8_t ch) +{ + s->p[s->length] = ch; + s->p[s->length + 1] = '\0'; + s->length += 1; +} + +void strcpylen_WS(_WString* s, const int8_t* buf, unsigned int len) +{ + s->length = len; + memcpy((int8_t*)s->p, buf, len + 1); +} + +void strcatlen_WS(_WString* s, const int8_t* buf, unsigned int len) +{ + memcpy((int8_t*)&s->p[s->length], buf, len + 1); + s->length += len; +} + +void strcat_WS(_WString* s, const _WString* s2) +{ + memcpy((int8_t*)&s->p[s->length], s2->p, s2->length + 1); + s->length += s2->length; +} + +#endif /* DISTORM_LIGHT */ diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/wstring.h b/module/src/main/cpp/whale/src/dbi/x86/distorm/wstring.h new file mode 100644 index 00000000..6b3a2a3f --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/wstring.h @@ -0,0 +1,35 @@ +/* +wstring.h + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ + + +#ifndef WSTRING_H +#define WSTRING_H + +#include "config.h" + +#ifndef DISTORM_LIGHT + +void strclear_WS(_WString* s); +void chrcat_WS(_WString* s, uint8_t ch); +void strcpylen_WS(_WString* s, const int8_t* buf, unsigned int len); +void strcatlen_WS(_WString* s, const int8_t* buf, unsigned int len); +void strcat_WS(_WString* s, const _WString* s2); + +/* +* Warning, this macro should be used only when the compiler knows the size of string in advance! +* This macro is used in order to spare the call to strlen when the strings are known already. +* Note: sizeof includes NULL terminated character. +*/ +#define strcat_WSN(s, t) strcatlen_WS((s), ((const int8_t*)t), sizeof((t))-1) +#define strcpy_WSN(s, t) strcpylen_WS((s), ((const int8_t*)t), sizeof((t))-1) + +#endif /* DISTORM_LIGHT */ + +#endif /* WSTRING_H */ diff --git a/module/src/main/cpp/whale/src/dbi/x86/distorm/x86defs.h b/module/src/main/cpp/whale/src/dbi/x86/distorm/x86defs.h new file mode 100644 index 00000000..ca3a3adf --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/distorm/x86defs.h @@ -0,0 +1,82 @@ +/* +x86defs.h + +diStorm3 - Powerful disassembler for X86/AMD64 +http://ragestorm.net/distorm/ +distorm at gmail dot com +Copyright (C) 2003-2018 Gil Dabah +This library is licensed under the BSD license. See the file COPYING. +*/ + + +#ifndef X86DEFS_H +#define X86DEFS_H + + +#define SEG_REGS_MAX (6) +#define CREGS_MAX (9) +#define DREGS_MAX (8) + +/* Maximum instruction size, including prefixes */ +#define INST_MAXIMUM_SIZE (15) + +/* Maximum range of imm8 (comparison type) of special SSE CMP instructions. */ +#define INST_CMP_MAX_RANGE (8) + +/* Maximum range of imm8 (comparison type) of special AVX VCMP instructions. */ +#define INST_VCMP_MAX_RANGE (32) + +/* Wait instruction byte code. */ +#define INST_WAIT_INDEX (0x9b) + +/* Lea instruction byte code. */ +#define INST_LEA_INDEX (0x8d) + +/* NOP/XCHG instruction byte code. */ +#define INST_NOP_INDEX (0x90) + +/* ARPL/MOVSXD instruction byte code. */ +#define INST_ARPL_INDEX (0x63) + +/* + * Minimal MODR/M value of divided instructions. + * It's 0xc0, two MSBs set, which indicates a general purpose register is used too. + */ +#define INST_DIVIDED_MODRM (0xc0) + +/* This is the escape byte value used for 3DNow! instructions. */ +#define _3DNOW_ESCAPE_BYTE (0x0f) + +#define PREFIX_LOCK (0xf0) +#define PREFIX_REPNZ (0xf2) +#define PREFIX_REP (0xf3) +#define PREFIX_CS (0x2e) +#define PREFIX_SS (0x36) +#define PREFIX_DS (0x3e) +#define PREFIX_ES (0x26) +#define PREFIX_FS (0x64) +#define PREFIX_GS (0x65) +#define PREFIX_OP_SIZE (0x66) +#define PREFIX_ADDR_SIZE (0x67) +#define PREFIX_VEX2b (0xc5) +#define PREFIX_VEX3b (0xc4) + +/* REX prefix value range, 64 bits mode decoding only. */ +#define PREFIX_REX_LOW (0x40) +#define PREFIX_REX_HI (0x4f) +/* In order to use the extended GPR's we have to add 8 to the Modr/M info values. */ +#define EX_GPR_BASE (8) + +/* Mask for REX and VEX features: */ +/* Base */ +#define PREFIX_EX_B (1) +/* Index */ +#define PREFIX_EX_X (2) +/* Register */ +#define PREFIX_EX_R (4) +/* Operand Width */ +#define PREFIX_EX_W (8) +/* Vector Lengh */ +#define PREFIX_EX_L (0x10) + +#endif /* X86DEFS_H */ diff --git a/module/src/main/cpp/whale/src/dbi/x86/inline_hook_x86.cc b/module/src/main/cpp/whale/src/dbi/x86/inline_hook_x86.cc new file mode 100644 index 00000000..b66be593 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/inline_hook_x86.cc @@ -0,0 +1,71 @@ +#include +#include +#include "dbi/x86/inline_hook_x86.h" +#include "dbi/x86/instruction_rewriter_x86.h" +#include "assembler/x86/assembler_x86.h" +#include "platform/memory.h" + +#define __ masm. + +namespace whale { +namespace x86 { + +void X86InlineHook::StartHook() { + + DCHECK(address_ != 0 && replace_ != 0); + X86Assembler masm; + __ movl(EDX, Immediate(replace_)); + __ jmp(EDX); + masm.FinalizeCode(); + + size_t backup_size = masm.GetBuffer()->Size(); + size_t code_aligned_size = 0; + do { + u1 *code = reinterpret_cast(address_) + code_aligned_size; + u1 size = Decode(code, UINT8_MAX, 0).size; + code_aligned_size += size; + } while (code_aligned_size < backup_size); + + backup_size = code_aligned_size; + + backup_code_ = new BackupCode(GetTarget(), backup_size); + + if (backup_ != nullptr) { + intptr_t tail = address_ + backup_size; + intptr_t trampoline = BuildTrampoline(static_cast(tail)); + *backup_ = trampoline; + } + + ScopedMemoryPatch patch(GetTarget(), masm.GetBuffer()->contents(), + masm.GetBuffer()->Size()); +} + +intptr_t X86InlineHook::BuildTrampoline(u4 tail) { + X86Assembler masm; + X86InstructionRewriter rewriter(&masm, backup_code_, GetTarget(), tail); + rewriter.Rewrite(); + + __ movl(EDX, Immediate(tail)); + __ jmp(EDX); + masm.FinalizeCode(); + size_t size = masm.GetBuffer()->Size(); + + trampoline_addr_ = mmap(nullptr, GetPageSize(), PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0); + memcpy(trampoline_addr_, masm.GetBuffer()->contents(), size); + mprotect(trampoline_addr_, GetPageSize(), PROT_READ | PROT_EXEC); + return reinterpret_cast(trampoline_addr_); +} + + +void X86InlineHook::StopHook() { + size_t code_size = backup_code_->GetSizeInBytes(); + void *insns = backup_code_->GetInstructions(); + ScopedMemoryPatch patch(GetTarget(), insns, code_size); + memcpy(GetTarget(), insns, code_size); + if (trampoline_addr_ != nullptr) { + munmap(trampoline_addr_, GetPageSize()); + } +} + +} // namespace x86 +} // namespace whale diff --git a/module/src/main/cpp/whale/src/dbi/x86/inline_hook_x86.h b/module/src/main/cpp/whale/src/dbi/x86/inline_hook_x86.h new file mode 100644 index 00000000..11753358 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/inline_hook_x86.h @@ -0,0 +1,35 @@ +#ifndef WHALE_ARCH_X86_INLINE_HOOK_X86_H_ +#define WHALE_ARCH_X86_INLINE_HOOK_X86_H_ + +#include "dbi/hook_common.h" +#include "base/primitive_types.h" +#include "dbi/backup_code.h" + +namespace whale { +namespace x86 { + +class X86InlineHook : public InlineHook { + public: + X86InlineHook(intptr_t address, intptr_t replace, intptr_t *backup) + : InlineHook(address, replace, backup), backup_code_(nullptr), + trampoline_addr_(nullptr) {} + + ~X86InlineHook() override { + delete backup_code_; + } + + void StartHook() override; + + void StopHook() override; + + private: + BackupCode *backup_code_; + void *trampoline_addr_; + + intptr_t BuildTrampoline(u4 tail); +}; + +} // namespace x86 +} // namespace whale + +#endif // WHALE_ARCH_X86_INLINE_HOOK_X86_H_ diff --git a/module/src/main/cpp/whale/src/dbi/x86/instruction_rewriter_x86.cc b/module/src/main/cpp/whale/src/dbi/x86/instruction_rewriter_x86.cc new file mode 100644 index 00000000..ca9ec7e6 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/instruction_rewriter_x86.cc @@ -0,0 +1,119 @@ +#include "dbi/x86/distorm/distorm.h" +#include +#include "dbi/x86/instruction_rewriter_x86.h" +#include "instruction_rewriter_x86.h" + +#define __ masm_-> + +namespace whale { +namespace x86 { + +void X86InstructionRewriter::Rewrite() { + __ addl(ESP, Immediate(4)); + u1 *insns = code_->GetInstructions(); + int pos = 0; + size_t count = code_->GetCount(); + u4 pc = cfg_pc_; + while (pos < count) { + u1 *current = insns + pos; + _DInst insn = Decode(current, count - pos, 0); + pc += insn.size; + switch (insn.opcode) { + case I_CALL: + Rewrite_Call(current, pc, insn); + break; + case I_JMP: + Rewrite_Jmp(current, pc, insn); + break; + case I_JECXZ: + case I_JCXZ: + Rewrite_JRCXZ(current, pc, insn); + break; + default: + EmitCode(current, insn.size); + break; + } + pos += insn.size; + } +} + +void X86InstructionRewriter::Rewrite_Call(u1 *current, u4 pc, _DInst insn) { + bool rewritten = false; + if (insn.ops[0].type == O_PC) { + u4 pcrel_address = pc + insn.imm.dword; + Register reg; + if (IsGetPCThunkToRegister(pcrel_address, ®)) { + __ movl(reg, Immediate(pc)); + rewritten = true; + } else if (pcrel_address >= tail_pc_) { + __ pushl(Immediate(pc)); + __ movl(EDX, Immediate(pcrel_address)); + __ jmp(EDX); + rewritten = true; + } + } + if (!rewritten) { + EmitCode(current, insn.size); + } +} + +void X86InstructionRewriter::Rewrite_Jmp(u1 *current, u4 pc, _DInst insn) { + bool rewritten = false; + if (insn.ops[0].type == O_PC) { + u4 pcrel_address = pc + insn.imm.dword; + if (pcrel_address >= tail_pc_) { + __ movl(EDX, Immediate(pcrel_address)); + __ jmp(EDX); + rewritten = true; + } + } + if (!rewritten) { + EmitCode(current, insn.size); + } +} + +void X86InstructionRewriter::Rewrite_JRCXZ(u1 *current, u4 pc, _DInst insn) { + bool rewritten = false; + u4 pcrel_address = pc + insn.imm.dword; + if (pcrel_address >= tail_pc_) { + NearLabel true_label, false_label; + + __ jecxz(&true_label); + __ jmp(&false_label); + + __ Bind(&true_label); + __ movl(EDX, Immediate(pcrel_address)); + __ jmp(EDX); + + __ Bind(&false_label); + rewritten = true; + } + if (!rewritten) { + EmitCode(current, insn.size); + } +} + +/** + * Find the following scheme: + * + * _x86_get_pc_thunk_bx: + * mov ebx, [esp+0] + * ret + * + */ +bool X86InstructionRewriter::IsGetPCThunkToRegister(u4 address, Register *reg) { + u1 *current = reinterpret_cast(address); + _DInst insn0 = Decode(current, UINT8_MAX, 0); + if (insn0.opcode != I_MOV || insn0.ops[0].type != O_REG || insn0.ops[1].type != O_SMEM) { + return false; + } + _DInst insn1 = Decode(current + insn0.size, UINT8_MAX, 0); + if (insn1.opcode != I_RET) { + return false; + } + *reg = static_cast(insn0.ops[0].index % 16); + return true; +} + +} // namespace x86 +} // namespace whale diff --git a/module/src/main/cpp/whale/src/dbi/x86/instruction_rewriter_x86.h b/module/src/main/cpp/whale/src/dbi/x86/instruction_rewriter_x86.h new file mode 100644 index 00000000..7cecb1ed --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/instruction_rewriter_x86.h @@ -0,0 +1,66 @@ +#ifndef ARCH_X86_REWRITER_X86_H_ +#define ARCH_X86_REWRITER_X86_H_ + +#include +#include "assembler/x86/assembler_x86.h" +#include "base/primitive_types.h" +#include "dbi/backup_code.h" +#include "dbi/instruction_rewriter.h" +#include "dbi/instruction_set.h" +#include "base/macros.h" + +namespace whale { +namespace x86 { + +class X86InstructionRewriter : public InstructionReWriter { + public: + X86InstructionRewriter(whale::x86::X86Assembler *masm, BackupCode *code, + u4 origin_pc, u4 tail_pc) + : masm_(masm), code_(code), cfg_pc_(origin_pc), tail_pc_(tail_pc) {} + + ~X86InstructionRewriter() = default; + + const InstructionSet GetISA() override { + return InstructionSet::kX86; + } + + void Rewrite() override; + + u1 *GetStartAddress() override { + return masm_->GetBuffer()->contents(); + } + + size_t GetCodeSize() override { + return masm_->GetBuffer()->Size(); + } + + void EmitCode(u1 *start, size_t size) { + for (int i = 0; i < size; ++i) { + AssemblerBuffer::EnsureCapacity ensured(masm_->GetBuffer()); + masm_->GetBuffer()->Emit(start[i]); + } + } + + private: + const u4 cfg_pc_; + const u4 tail_pc_; + whale::x86::X86Assembler *masm_; + BackupCode *code_; + + void Rewrite_Call(u1 *current, u4 pc, _DInst insn); + + bool IsGetPCThunkToRegister(u4 address, Register *reg); + + void Rewrite_Jmp(u1 *current, u4 pc, _DInst insn); + + void Rewrite_JRCXZ(u1 *current, u4 pc, _DInst insn); +}; + + +} // namespace arm64 +} // namespace whale + + +#endif // ARCH_X86_REWRITER_X86_H_ + + diff --git a/module/src/main/cpp/whale/src/dbi/x86/intercept_syscall_x86.cc b/module/src/main/cpp/whale/src/dbi/x86/intercept_syscall_x86.cc new file mode 100644 index 00000000..2c2d8f1c --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/intercept_syscall_x86.cc @@ -0,0 +1,28 @@ +#include "dbi/x86/intercept_syscall_x86.h" +#include "base/primitive_types.h" + +namespace whale { +namespace x86 { + + +void X86InterceptSysCallHook::FindSysCalls(uintptr_t start_addr, uintptr_t end_addr) { + u1 *start = reinterpret_cast(start_addr); + u1 *end = reinterpret_cast(end_addr) - 1; + while (start < end) { + // int 80h + if (*start == 0xcd & *(++start) == 0x80) { + // eax: sysnum + // ebx: arg0 + // ecx: arg1 + // edx: arg2 + // esi: arg3 + // edi: arg4 + // ebp: arg5 + } + } + +} + + +} // namespace x86 +} // namespace whale diff --git a/module/src/main/cpp/whale/src/dbi/x86/intercept_syscall_x86.h b/module/src/main/cpp/whale/src/dbi/x86/intercept_syscall_x86.h new file mode 100644 index 00000000..b49ec5c4 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86/intercept_syscall_x86.h @@ -0,0 +1,20 @@ +#ifndef WHALE_DBI_X86_INTERCEPT_SYSCALL_HOOK_H_ +#define WHALE_DBI_X86_INTERCEPT_SYSCALL_HOOK_H_ + +#include "dbi/hook_common.h" + +namespace whale { +namespace x86 { + +class X86InterceptSysCallHook : public InterceptSysCallHook { + public: + X86InterceptSysCallHook(MemoryRangeCallback callback) : InterceptSysCallHook(callback) {} + + protected: + void FindSysCalls(uintptr_t start, uintptr_t end); +}; + +} // namespace x86 +} // namespace whale + +#endif // WHALE_DBI_X86_INTERCEPT_SYSCALL_HOOK_H_ diff --git a/module/src/main/cpp/whale/src/dbi/x86_64/inline_hook_x86_64.cc b/module/src/main/cpp/whale/src/dbi/x86_64/inline_hook_x86_64.cc new file mode 100644 index 00000000..42286f4a --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86_64/inline_hook_x86_64.cc @@ -0,0 +1,73 @@ +#include +#include +#include +#include "assembler/x86_64/assembler_x86_64.h" +#include "dbi/x86_64/inline_hook_x86_64.h" +#include "dbi/x86_64/instruction_rewriter_x86_64.h" +#include "dbi/x86/distorm/distorm.h" + +#define __ masm. + +namespace whale { +namespace x86_64 { + + +void X86_64InlineHook::StartHook() { + CHECK(address_ != 0 && replace_ != 0); + X86_64Assembler masm; + + __ movq(RAX, Immediate(replace_)); + __ jmp(RAX); + masm.FinalizeCode(); + + size_t backup_size = masm.GetBuffer()->Size(); + size_t code_aligned_size = 0; + do { + u1 *code = reinterpret_cast(address_) + code_aligned_size; + u1 size = Decode(code, UINT8_MAX, 1).size; + code_aligned_size += size; + } while (code_aligned_size < backup_size); + + backup_size = code_aligned_size; + backup_code_ = new BackupCode(GetTarget(), backup_size); + + if (backup_ != nullptr) { + intptr_t tail = address_ + backup_size; + intptr_t trampoline = BuildTrampoline(static_cast(tail)); + *backup_ = trampoline; + } + + ScopedMemoryPatch patch(GetTarget(), masm.GetBuffer()->contents(), + masm.GetBuffer()->Size()); +} + +intptr_t X86_64InlineHook::BuildTrampoline(u8 tail) { + X86_64Assembler masm; + X86_64InstructionRewriter rewriter(&masm, backup_code_, GetTarget(), tail); + rewriter.Rewrite(); + + __ movq(R12, Immediate(tail)); + __ jmp(R12); + + masm.FinalizeCode(); + + size_t size = masm.GetBuffer()->Size(); + trampoline_addr_ = mmap(nullptr, GetPageSize(), PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0); + memcpy(trampoline_addr_, masm.GetBuffer()->contents(), size); + mprotect(trampoline_addr_, GetPageSize(), PROT_READ | PROT_EXEC); + return reinterpret_cast(trampoline_addr_); +} + + +void X86_64InlineHook::StopHook() { + size_t code_size = backup_code_->GetSizeInBytes(); + void *insns = backup_code_->GetInstructions(); + ScopedMemoryPatch patch(GetTarget(), insns, code_size); + memcpy(GetTarget(), insns, code_size); + if (trampoline_addr_ != nullptr) { + munmap(trampoline_addr_, GetPageSize()); + } +} + +} // namespace x86 +} // namespace whale diff --git a/module/src/main/cpp/whale/src/dbi/x86_64/inline_hook_x86_64.h b/module/src/main/cpp/whale/src/dbi/x86_64/inline_hook_x86_64.h new file mode 100644 index 00000000..87761984 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86_64/inline_hook_x86_64.h @@ -0,0 +1,35 @@ +#ifndef WHALE_ARCH_X86_64_INLINE_HOOK_X86_64_H_ +#define WHALE_ARCH_X86_64_INLINE_HOOK_X86_64_H_ + +#include "dbi/hook_common.h" +#include "base/primitive_types.h" +#include "dbi/backup_code.h" + +namespace whale { +namespace x86_64 { + +class X86_64InlineHook : public InlineHook { + public: + X86_64InlineHook(intptr_t address, intptr_t replace, intptr_t *backup) + : InlineHook(address, replace, backup), backup_code_(nullptr), + trampoline_addr_(nullptr) {} + + ~X86_64InlineHook() override { + delete backup_code_; + } + + void StartHook() override; + + void StopHook() override; + + private: + BackupCode *backup_code_; + void *trampoline_addr_; + + intptr_t BuildTrampoline(u8 tail); +}; + +} // namespace x86 +} // namespace whale + +#endif // WHALE_ARCH_X86_64_INLINE_HOOK_X86_64_H_ diff --git a/module/src/main/cpp/whale/src/dbi/x86_64/instruction_rewriter_x86_64.cc b/module/src/main/cpp/whale/src/dbi/x86_64/instruction_rewriter_x86_64.cc new file mode 100644 index 00000000..95190895 --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86_64/instruction_rewriter_x86_64.cc @@ -0,0 +1,111 @@ +#include "dbi/x86/instruction_rewriter_x86.h" +#include "instruction_rewriter_x86_64.h" +#include "dbi/x86/distorm/distorm.h" +#include "dbi/x86/distorm/mnemonics.h" + +#define __ masm_-> + +namespace whale { +namespace x86_64 { + +constexpr static const unsigned kRIP_index = 74; + +void X86_64InstructionRewriter::Rewrite() { + u1 *instructions = code_->GetInstructions(); + int pos = 0; + size_t count = code_->GetCount(); + u8 pc = cfg_pc_; + while (pos < code_->GetCount()) { + u1 *current = instructions + pos; + _DInst insn = Decode(current, count - pos, 1); + pc += insn.size; + switch (insn.opcode) { + case I_MOV: + Rewrite_Mov(current, pc, insn); + break; + case I_CALL: + Rewrite_Call(current, pc, insn); + break; + case I_JMP: + Rewrite_Jmp(current, pc, insn); + break; + case I_JECXZ: + case I_JRCXZ: + Rewrite_JRCXZ(current, pc, insn); + break; + default: + EmitCode(current, insn.size); + break; + } + pos += insn.size; + } +} + +void X86_64InstructionRewriter::Rewrite_Mov(u1 *current, u8 pc, _DInst insn) { + _Operand op0 = insn.ops[0]; + _Operand op1 = insn.ops[1]; + + if (op0.type == O_REG && op1.type == O_SMEM && op1.index == kRIP_index) { + // mov rd, nword ptr [rip + disp] + int rd = insn.ops[0].index % 16; + __ movq(rd, Immediate(pc + insn.disp)); + __ movl(rd, Address(rd, 0)); + } else if (op0.type == O_SMEM && op1.type == O_IMM && op1.index == kRIP_index) { + // mov nword ptr [rip + disp], imm + __ pushq(RAX); + __ movq(RAX, Immediate(pc + insn.disp)); + if (op1.size <= 32) { + __ movl(Address(RAX, 0), Immediate(insn.imm.dword)); + } else { + __ movq(Address(RAX, 0), Immediate(insn.imm.qword)); + } + __ popq(RAX); + } else { + EmitCode(current, insn.size); + } +} + +void X86_64InstructionRewriter::Rewrite_Call(u1 *current, u8 pc, _DInst insn) { + _Operand op = insn.ops[0]; + if (op.type == O_PC) { + __ movq(R11, Immediate(pc + insn.imm.qword)); + __ call(R11); + } else { + EmitCode(current, insn.size); + } +} + +void X86_64InstructionRewriter::Rewrite_Jmp(u1 *current, u8 pc, _DInst insn) { + _Operand op = insn.ops[0]; + if (op.type == O_PC) { + __ movq(R11, Immediate(pc + insn.imm.qword)); + __ jmp(R11); + } else { + EmitCode(current, insn.size); + } +} + +void X86_64InstructionRewriter::Rewrite_JRCXZ(u1 *current, u8 pc, _DInst insn) { + bool rewritten = false; + u8 pcrel_address = pc + insn.imm.qword; + if (pcrel_address >= tail_pc_) { + NearLabel true_label, false_label; + + __ jrcxz(&true_label); + __ jmp(&false_label); + + __ Bind(&true_label); + __ movq(R11, Immediate(pcrel_address)); + __ jmp(R11); + + __ Bind(&false_label); + rewritten = true; + } + if (!rewritten) { + EmitCode(current, insn.size); + } +} + + +} // namespace x86 +} // namespace whale diff --git a/module/src/main/cpp/whale/src/dbi/x86_64/instruction_rewriter_x86_64.h b/module/src/main/cpp/whale/src/dbi/x86_64/instruction_rewriter_x86_64.h new file mode 100644 index 00000000..2a0fb01b --- /dev/null +++ b/module/src/main/cpp/whale/src/dbi/x86_64/instruction_rewriter_x86_64.h @@ -0,0 +1,66 @@ +#ifndef ARCH_X86_64_REWRITER_X86_64_H_ +#define ARCH_X86_64_REWRITER_X86_64_H_ + +#include +#include "assembler/x86_64/assembler_x86_64.h" +#include "base/primitive_types.h" +#include "dbi/backup_code.h" +#include "dbi/instruction_rewriter.h" +#include "dbi/instruction_set.h" +#include "base/macros.h" + +namespace whale { +namespace x86_64 { + +class X86_64InstructionRewriter : public InstructionReWriter { + public: + X86_64InstructionRewriter(X86_64Assembler *masm, BackupCode *code, + u8 origin_pc, u8 tail_pc) + : masm_(masm), code_(code), cfg_pc_(origin_pc), tail_pc_(tail_pc) {} + + ~X86_64InstructionRewriter() = default; + + const InstructionSet GetISA() override { + return InstructionSet::kX86_64; + } + + void Rewrite() override; + + u1 *GetStartAddress() override { + return masm_->GetBuffer()->contents(); + } + + size_t GetCodeSize() override { + return masm_->GetBuffer()->Size(); + } + + void EmitCode(u1 *start, size_t size) { + for (int i = 0; i < size; ++i) { + AssemblerBuffer::EnsureCapacity ensured(masm_->GetBuffer()); + masm_->GetBuffer()->Emit(start[i]); + } + } + + private: + const u8 cfg_pc_; + const u8 tail_pc_; + X86_64Assembler *masm_; + BackupCode *code_; + + void Rewrite_Mov(u1 *current, u8 pc, _DInst insn); + + void Rewrite_Call(u1 *current, u8 pc, _DInst insn); + + void Rewrite_Jmp(u1 *current, u8 pc, _DInst insn); + + void Rewrite_JRCXZ(u1 *current, u8 pc, _DInst insn); +}; + + +} // namespace arm64 +} // namespace whale + + +#endif // ARCH_X86_64_REWRITER_X86_64_H_ + + diff --git a/module/src/main/cpp/whale/src/interceptor.cc b/module/src/main/cpp/whale/src/interceptor.cc new file mode 100644 index 00000000..bd997ce6 --- /dev/null +++ b/module/src/main/cpp/whale/src/interceptor.cc @@ -0,0 +1,32 @@ +#include "interceptor.h" + +namespace whale { + +Interceptor *Interceptor::Instance() { + static Interceptor instance; + return &instance; +} + +void Interceptor::AddHook(std::unique_ptr &hook) { + hook->id_ = static_cast(hook_list_.size()); + hook->StartHook(); + hook_list_.push_back(std::move(hook)); +} + +void Interceptor::RemoveHook(int id) { + for (auto &entry : hook_list_) { + if (entry->id_ == id) { + hook_list_.remove(entry); + entry->StopHook(); + break; + } + } +} + +void Interceptor::TraverseHooks(std::function &)> visitor) { + for (auto &hook : hook_list_) { + visitor(hook); + } +} + +} // namespace whale diff --git a/module/src/main/cpp/whale/src/interceptor.h b/module/src/main/cpp/whale/src/interceptor.h new file mode 100644 index 00000000..cec9c53d --- /dev/null +++ b/module/src/main/cpp/whale/src/interceptor.h @@ -0,0 +1,32 @@ +#ifndef WHALE_CODE_INTERCEPTOR_H_ +#define WHALE_CODE_INTERCEPTOR_H_ + +#include +#include +#include "base/logging.h" +#include "dbi/instruction_set.h" +#include "dbi/hook_common.h" + +namespace whale { + +class Interceptor { + public: + static Interceptor *Instance(); + + void AddHook(std::unique_ptr &hook); + + void RemoveHook(int id); + + void RemoveHook(std::unique_ptr &hook) { + RemoveHook(hook->id_); + } + void TraverseHooks(std::function&)> visitor); + + private: + std::list> hook_list_; +}; + +} // namespace whale + +#endif // WHALE_CODE_INTERCEPTOR_H_ + diff --git a/module/src/main/cpp/whale/src/libffi/aarch64/ffi_arm64.c b/module/src/main/cpp/whale/src/libffi/aarch64/ffi_arm64.c new file mode 100644 index 00000000..2533f50f --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/aarch64/ffi_arm64.c @@ -0,0 +1,946 @@ +#if defined(__aarch64__) || defined(__arm64__) + +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#include +#include +#include +#include +#include +#include +#include "internal.h" + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 128-bit type. */ +#if FFI_TYPE_DOUBLE != FFI_TYPE_LONGDOUBLE +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +union _d +{ + UINT64 d; + UINT32 s[2]; +}; + +struct _v +{ + union _d d[2] __attribute__((aligned(16))); +}; + +struct call_context +{ + struct _v v[N_V_ARG_REG]; + UINT64 x[N_X_ARG_REG]; +}; + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#include +#endif + +#else + +#if defined (__clang__) && defined (__APPLE__) +extern void sys_icache_invalidate (void *start, size_t len); +#endif + +static inline void +ffi_clear_cache (void *start, void *end) +{ +#if defined (__clang__) && defined (__APPLE__) + sys_icache_invalidate (start, (char *)end - (char *)start); +#elif defined (__GNUC__) + __builtin___clear_cache (start, end); +#else +#error "Missing builtin to flush instruction cache" +#endif +} + +#endif + +/* A subroutine of is_vfp_type. Given a structure type, return the type code + of the first non-structure element. Recurse for structure elements. + Return -1 if the structure is in fact empty, i.e. no nested elements. */ + +static int +is_hfa0 (const ffi_type *ty) +{ + ffi_type **elements = ty->elements; + int i, ret = -1; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + ret = elements[i]->type; + if (ret == FFI_TYPE_STRUCT || ret == FFI_TYPE_COMPLEX) + { + ret = is_hfa0 (elements[i]); + if (ret < 0) + continue; + } + break; + } + + return ret; +} + +/* A subroutine of is_vfp_type. Given a structure type, return true if all + of the non-structure elements are the same as CANDIDATE. */ + +static int +is_hfa1 (const ffi_type *ty, int candidate) +{ + ffi_type **elements = ty->elements; + int i; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + return 1; +} + +/* Determine if TY may be allocated to the FP registers. This is both an + fp scalar type as well as an homogenous floating point aggregate (HFA). + That is, a structure consisting of 1 to 4 members of all the same type, + where that type is an fp scalar. + + Returns non-zero iff TY is an HFA. The result is the AARCH64_RET_* + constant for the type. */ + +static int +is_vfp_type (const ffi_type *ty) +{ + ffi_type **elements; + int candidate, i; + size_t size, ele_count; + + /* Quickest tests first. */ + candidate = ty->type; + switch (candidate) + { + default: + return 0; + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + ele_count = 1; + goto done; + case FFI_TYPE_COMPLEX: + candidate = ty->elements[0]->type; + switch (candidate) + { + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + ele_count = 2; + goto done; + } + return 0; + case FFI_TYPE_STRUCT: + break; + } + + /* No HFA types are smaller than 4 bytes, or larger than 64 bytes. */ + size = ty->size; + if (size < 4 || size > 64) + return 0; + + /* Find the type of the first non-structure member. */ + elements = ty->elements; + candidate = elements[0]->type; + if (candidate == FFI_TYPE_STRUCT || candidate == FFI_TYPE_COMPLEX) + { + for (i = 0; ; ++i) + { + candidate = is_hfa0 (elements[i]); + if (candidate >= 0) + break; + } + } + + /* If the first member is not a floating point type, it's not an HFA. + Also quickly re-check the size of the structure. */ + switch (candidate) + { + case FFI_TYPE_FLOAT: + ele_count = size / sizeof(float); + if (size != ele_count * sizeof(float)) + return 0; + break; + case FFI_TYPE_DOUBLE: + ele_count = size / sizeof(double); + if (size != ele_count * sizeof(double)) + return 0; + break; + case FFI_TYPE_LONGDOUBLE: + ele_count = size / sizeof(long double); + if (size != ele_count * sizeof(long double)) + return 0; + break; + default: + return 0; + } + if (ele_count > 4) + return 0; + + /* Finally, make sure that all scalar elements are the same type. */ + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + /* All tests succeeded. Encode the result. */ + done: + return candidate * 4 + (4 - (int)ele_count); +} + +/* Representation of the procedure call argument marshalling + state. + + The terse state variable names match the names used in the AARCH64 + PCS. */ + +struct arg_state +{ + unsigned ngrn; /* Next general-purpose register number. */ + unsigned nsrn; /* Next vector register number. */ + size_t nsaa; /* Next stack offset. */ + +#if defined (__APPLE__) + unsigned allocating_variadic; +#endif +}; + +/* Initialize a procedure call argument marshalling state. */ +static void +arg_init (struct arg_state *state) +{ + state->ngrn = 0; + state->nsrn = 0; + state->nsaa = 0; +#if defined (__APPLE__) + state->allocating_variadic = 0; +#endif +} + +/* Allocate an aligned slot on the stack and return a pointer to it. */ +static void * +allocate_to_stack (struct arg_state *state, void *stack, + size_t alignment, size_t size) +{ + size_t nsaa = state->nsaa; + + /* Round up the NSAA to the larger of 8 or the natural + alignment of the argument's type. */ +#if defined (__APPLE__) + if (state->allocating_variadic && alignment < 8) + alignment = 8; +#else + if (alignment < 8) + alignment = 8; +#endif + + nsaa = FFI_ALIGN (nsaa, alignment); + state->nsaa = nsaa + size; + + return (char *)stack + nsaa; +} + +static ffi_arg +extend_integer_type (void *source, int type) +{ + switch (type) + { + case FFI_TYPE_UINT8: + return *(UINT8 *) source; + case FFI_TYPE_SINT8: + return *(SINT8 *) source; + case FFI_TYPE_UINT16: + return *(UINT16 *) source; + case FFI_TYPE_SINT16: + return *(SINT16 *) source; + case FFI_TYPE_UINT32: + return *(UINT32 *) source; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + return *(SINT32 *) source; + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + return *(UINT64 *) source; + break; + case FFI_TYPE_POINTER: + return *(uintptr_t *) source; + default: + abort(); + } +} + +static void +extend_hfa_type (void *dest, void *src, int h) +{ + ssize_t f = h - AARCH64_RET_S4; + void *x0; + + asm volatile ( + "adr %0, 0f\n" +" add %0, %0, %1\n" +" br %0\n" +"0: ldp s16, s17, [%3]\n" /* S4 */ +" ldp s18, s19, [%3, #8]\n" +" b 4f\n" +" ldp s16, s17, [%3]\n" /* S3 */ +" ldr s18, [%3, #8]\n" +" b 3f\n" +" ldp s16, s17, [%3]\n" /* S2 */ +" b 2f\n" +" nop\n" +" ldr s16, [%3]\n" /* S1 */ +" b 1f\n" +" nop\n" +" ldp d16, d17, [%3]\n" /* D4 */ +" ldp d18, d19, [%3, #16]\n" +" b 4f\n" +" ldp d16, d17, [%3]\n" /* D3 */ +" ldr d18, [%3, #16]\n" +" b 3f\n" +" ldp d16, d17, [%3]\n" /* D2 */ +" b 2f\n" +" nop\n" +" ldr d16, [%3]\n" /* D1 */ +" b 1f\n" +" nop\n" +" ldp q16, q17, [%3]\n" /* Q4 */ +" ldp q18, q19, [%3, #32]\n" +" b 4f\n" +" ldp q16, q17, [%3]\n" /* Q3 */ +" ldr q18, [%3, #32]\n" +" b 3f\n" +" ldp q16, q17, [%3]\n" /* Q2 */ +" b 2f\n" +" nop\n" +" ldr q16, [%3]\n" /* Q1 */ +" b 1f\n" +"4: str q19, [%2, #48]\n" +"3: str q18, [%2, #32]\n" +"2: str q17, [%2, #16]\n" +"1: str q16, [%2]" + : "=&r"(x0) + : "r"(f * 12), "r"(dest), "r"(src) + : "memory", "v16", "v17", "v18", "v19"); +} + +static void * +compress_hfa_type (void *dest, void *reg, int h) +{ + switch (h) + { + case AARCH64_RET_S1: + if (dest == reg) + { +#ifdef __AARCH64EB__ + dest += 12; +#endif + } + else + *(float *)dest = *(float *)reg; + break; + case AARCH64_RET_S2: + asm ("ldp q16, q17, [%1]\n\t" + "st2 { v16.s, v17.s }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17"); + break; + case AARCH64_RET_S3: + asm ("ldp q16, q17, [%1]\n\t" + "ldr q18, [%1, #32]\n\t" + "st3 { v16.s, v17.s, v18.s }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18"); + break; + case AARCH64_RET_S4: + asm ("ldp q16, q17, [%1]\n\t" + "ldp q18, q19, [%1, #32]\n\t" + "st4 { v16.s, v17.s, v18.s, v19.s }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18", "v19"); + break; + + case AARCH64_RET_D1: + if (dest == reg) + { +#ifdef __AARCH64EB__ + dest += 8; +#endif + } + else + *(double *)dest = *(double *)reg; + break; + case AARCH64_RET_D2: + asm ("ldp q16, q17, [%1]\n\t" + "st2 { v16.d, v17.d }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17"); + break; + case AARCH64_RET_D3: + asm ("ldp q16, q17, [%1]\n\t" + "ldr q18, [%1, #32]\n\t" + "st3 { v16.d, v17.d, v18.d }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18"); + break; + case AARCH64_RET_D4: + asm ("ldp q16, q17, [%1]\n\t" + "ldp q18, q19, [%1, #32]\n\t" + "st4 { v16.d, v17.d, v18.d, v19.d }[0], [%0]" + : : "r"(dest), "r"(reg) : "memory", "v16", "v17", "v18", "v19"); + break; + + default: + if (dest != reg) + return memcpy (dest, reg, 16 * (4 - (h & 3))); + break; + } + return dest; +} + +/* Either allocate an appropriate register for the argument type, or if + none are available, allocate a stack slot and return a pointer + to the allocated space. */ + +static void * +allocate_int_to_reg_or_stack (struct call_context *context, + struct arg_state *state, + void *stack, size_t size) +{ + if (state->ngrn < N_X_ARG_REG) + return &context->x[state->ngrn++]; + + state->ngrn = N_X_ARG_REG; + return allocate_to_stack (state, stack, size, size); +} + +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + ffi_type *rtype = cif->rtype; + size_t bytes = cif->bytes; + int flags, i, n; + + switch (rtype->type) + { + case FFI_TYPE_VOID: + flags = AARCH64_RET_VOID; + break; + case FFI_TYPE_UINT8: + flags = AARCH64_RET_UINT8; + break; + case FFI_TYPE_UINT16: + flags = AARCH64_RET_UINT16; + break; + case FFI_TYPE_UINT32: + flags = AARCH64_RET_UINT32; + break; + case FFI_TYPE_SINT8: + flags = AARCH64_RET_SINT8; + break; + case FFI_TYPE_SINT16: + flags = AARCH64_RET_SINT16; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + flags = AARCH64_RET_SINT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = AARCH64_RET_INT64; + break; + case FFI_TYPE_POINTER: + flags = (sizeof(void *) == 4 ? AARCH64_RET_UINT32 : AARCH64_RET_INT64); + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + flags = is_vfp_type (rtype); + if (flags == 0) + { + size_t s = rtype->size; + if (s > 16) + { + flags = AARCH64_RET_VOID | AARCH64_RET_IN_MEM; + bytes += 8; + } + else if (s == 16) + flags = AARCH64_RET_INT128; + else if (s == 8) + flags = AARCH64_RET_INT64; + else + flags = AARCH64_RET_INT128 | AARCH64_RET_NEED_COPY; + } + break; + + default: + abort(); + } + + for (i = 0, n = cif->nargs; i < n; i++) + if (is_vfp_type (cif->arg_types[i])) + { + flags |= AARCH64_FLAG_ARG_V; + break; + } + + /* Round the stack up to a multiple of the stack alignment requirement. */ + cif->bytes = (unsigned) FFI_ALIGN(bytes, 16); + cif->flags = flags; +#if defined (__APPLE__) + cif->aarch64_nfixedargs = 0; +#endif + + return FFI_OK; +} + +#if defined (__APPLE__) +/* Perform Apple-specific cif processing for variadic calls */ +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned int nfixedargs, + unsigned int ntotalargs) +{ + ffi_status status = ffi_prep_cif_machdep (cif); + cif->aarch64_nfixedargs = nfixedargs; + return status; +} +#endif /* __APPLE__ */ + +extern void ffi_call_SYSV (struct call_context *context, void *frame, + void (*fn)(void), void *rvalue, int flags, + void *closure) FFI_HIDDEN; + +/* Call a function with the provided arguments and capture the return + value. */ +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *orig_rvalue, + void **avalue, void *closure) +{ + struct call_context *context; + void *stack, *frame, *rvalue; + struct arg_state state; + size_t stack_bytes, rtype_size, rsize; + int i, nargs, flags; + ffi_type *rtype; + + flags = cif->flags; + rtype = cif->rtype; + rtype_size = rtype->size; + stack_bytes = cif->bytes; + + /* If the target function returns a structure via hidden pointer, + then we cannot allow a null rvalue. Otherwise, mash a null + rvalue to void return type. */ + rsize = 0; + if (flags & AARCH64_RET_IN_MEM) + { + if (orig_rvalue == NULL) + rsize = rtype_size; + } + else if (orig_rvalue == NULL) + flags &= AARCH64_FLAG_ARG_V; + else if (flags & AARCH64_RET_NEED_COPY) + rsize = 16; + + /* Allocate consectutive stack for everything we'll need. */ + context = alloca (sizeof(struct call_context) + stack_bytes + 32 + rsize); + stack = context + 1; + frame = stack + stack_bytes; + rvalue = (rsize ? frame + 32 : orig_rvalue); + + arg_init (&state); + for (i = 0, nargs = cif->nargs; i < nargs; i++) + { + ffi_type *ty = cif->arg_types[i]; + size_t s = ty->size; + void *a = avalue[i]; + int h, t; + + t = ty->type; + switch (t) + { + case FFI_TYPE_VOID: + FFI_ASSERT (0); + break; + + /* If the argument is a basic type the argument is allocated to an + appropriate register, or if none are available, to the stack. */ + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + do_pointer: + { + ffi_arg ext = extend_integer_type (a, t); + if (state.ngrn < N_X_ARG_REG) + context->x[state.ngrn++] = ext; + else + { + void *d = allocate_to_stack (&state, stack, ty->alignment, s); + state.ngrn = N_X_ARG_REG; + /* Note that the default abi extends each argument + to a full 64-bit slot, while the iOS abi allocates + only enough space. */ +#ifdef __APPLE__ + memcpy(d, a, s); +#else + *(ffi_arg *)d = ext; +#endif + } + } + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + { + void *dest; + + h = is_vfp_type (ty); + if (h) + { + int elems = 4 - (h & 3); + if (state.nsrn + elems <= N_V_ARG_REG) + { + dest = &context->v[state.nsrn]; + state.nsrn += elems; + extend_hfa_type (dest, a, h); + break; + } + state.nsrn = N_V_ARG_REG; + dest = allocate_to_stack (&state, stack, ty->alignment, s); + } + else if (s > 16) + { + /* If the argument is a composite type that is larger than 16 + bytes, then the argument has been copied to memory, and + the argument is replaced by a pointer to the copy. */ + a = &avalue[i]; + t = FFI_TYPE_POINTER; + s = sizeof (void *); + goto do_pointer; + } + else + { + size_t n = (s + 7) / 8; + if (state.ngrn + n <= N_X_ARG_REG) + { + /* If the argument is a composite type and the size in + double-words is not more than the number of available + X registers, then the argument is copied into + consecutive X registers. */ + dest = &context->x[state.ngrn]; + state.ngrn += n; + } + else + { + /* Otherwise, there are insufficient X registers. Further + X register allocations are prevented, the NSAA is + adjusted and the argument is copied to memory at the + adjusted NSAA. */ + state.ngrn = N_X_ARG_REG; + dest = allocate_to_stack (&state, stack, ty->alignment, s); + } + } + memcpy (dest, a, s); + } + break; + + default: + abort(); + } + +#if defined (__APPLE__) + if (i + 1 == cif->aarch64_nfixedargs) + { + state.ngrn = N_X_ARG_REG; + state.nsrn = N_V_ARG_REG; + state.allocating_variadic = 1; + } +#endif + } + + ffi_call_SYSV (context, frame, fn, rvalue, flags, closure); + + if (flags & AARCH64_RET_NEED_COPY) + memcpy (orig_rvalue, rvalue, rtype_size); +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +#ifdef FFI_GO_CLOSURES +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} +#endif /* FFI_GO_CLOSURES */ + +/* Build a trampoline. */ + +extern void ffi_closure_SYSV (void) FFI_HIDDEN; +extern void ffi_closure_SYSV_V (void) FFI_HIDDEN; + +ffi_status +ffi_prep_closure_loc (ffi_closure *closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + void (*start)(void); + + if (cif->flags & AARCH64_FLAG_ARG_V) + start = ffi_closure_SYSV_V; + else + start = ffi_closure_SYSV; + +#if FFI_EXEC_TRAMPOLINE_TABLE +#ifdef __MACH__ + void **config = (void **)((uint8_t *)codeloc - PAGE_MAX_SIZE); + config[0] = closure; + config[1] = start; +#endif +#else + static const unsigned char trampoline[16] = { + 0x90, 0x00, 0x00, 0x58, /* ldr x16, tramp+16 */ + 0xf1, 0xff, 0xff, 0x10, /* adr x17, tramp+0 */ + 0x00, 0x02, 0x1f, 0xd6 /* br x16 */ + }; + char *tramp = closure->tramp; + + memcpy (tramp, trampoline, sizeof(trampoline)); + + *(UINT64 *)(tramp + 16) = (uintptr_t)start; + + ffi_clear_cache(tramp, tramp + FFI_TRAMPOLINE_SIZE); +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +#ifdef FFI_GO_CLOSURES +extern void ffi_go_closure_SYSV (void) FFI_HIDDEN; +extern void ffi_go_closure_SYSV_V (void) FFI_HIDDEN; + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*)) +{ + void (*start)(void); + + if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + if (cif->flags & AARCH64_FLAG_ARG_V) + start = ffi_go_closure_SYSV_V; + else + start = ffi_go_closure_SYSV; + + closure->tramp = start; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} +#endif /* FFI_GO_CLOSURES */ + +/* Primary handler to setup and invoke a function within a closure. + + A closure when invoked enters via the assembler wrapper + ffi_closure_SYSV(). The wrapper allocates a call context on the + stack, saves the interesting registers (from the perspective of + the calling convention) into the context then passes control to + ffi_closure_SYSV_inner() passing the saved context and a pointer to + the stack at the point ffi_closure_SYSV() was invoked. + + On the return path the assembler wrapper will reload call context + registers. + + ffi_closure_SYSV_inner() marshalls the call context into ffi value + descriptors, invokes the wrapped function, then marshalls the return + value back into the call context. */ + +int FFI_HIDDEN +ffi_closure_SYSV_inner (ffi_cif *cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + struct call_context *context, + void *stack, void *rvalue, void *struct_rvalue) +{ + void **avalue = (void**) alloca (cif->nargs * sizeof (void*)); + int i, h, nargs, flags; + struct arg_state state; + + arg_init (&state); + + for (i = 0, nargs = cif->nargs; i < nargs; i++) + { + ffi_type *ty = cif->arg_types[i]; + int t = ty->type; + size_t n, s = ty->size; + + switch (t) + { + case FFI_TYPE_VOID: + FFI_ASSERT (0); + break; + + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + avalue[i] = allocate_int_to_reg_or_stack (context, &state, stack, s); + break; + + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + h = is_vfp_type (ty); + if (h) + { + n = 4 - (h & 3); + if (state.nsrn + n <= N_V_ARG_REG) + { + void *reg = &context->v[state.nsrn]; + state.nsrn += n; + + /* Eeek! We need a pointer to the structure, however the + homogeneous float elements are being passed in individual + registers, therefore for float and double the structure + is not represented as a contiguous sequence of bytes in + our saved register context. We don't need the original + contents of the register storage, so we reformat the + structure into the same memory. */ + avalue[i] = compress_hfa_type (reg, reg, h); + } + else + { + state.nsrn = N_V_ARG_REG; + avalue[i] = allocate_to_stack (&state, stack, + ty->alignment, s); + } + } + else if (s > 16) + { + /* Replace Composite type of size greater than 16 with a + pointer. */ + avalue[i] = *(void **) + allocate_int_to_reg_or_stack (context, &state, stack, + sizeof (void *)); + } + else + { + n = (s + 7) / 8; + if (state.ngrn + n <= N_X_ARG_REG) + { + avalue[i] = &context->x[state.ngrn]; + state.ngrn += n; + } + else + { + state.ngrn = N_X_ARG_REG; + avalue[i] = allocate_to_stack (&state, stack, + ty->alignment, s); + } + } + break; + + default: + abort(); + } + +#if defined (__APPLE__) + if (i + 1 == cif->aarch64_nfixedargs) + { + state.ngrn = N_X_ARG_REG; + state.nsrn = N_V_ARG_REG; + state.allocating_variadic = 1; + } +#endif + } + + flags = cif->flags; + if (flags & AARCH64_RET_IN_MEM) + rvalue = struct_rvalue; + + fun (cif, rvalue, avalue, user_data); + + return flags; +} + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/aarch64/internal.h b/module/src/main/cpp/whale/src/libffi/aarch64/internal.h new file mode 100644 index 00000000..3a993f06 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/aarch64/internal.h @@ -0,0 +1,72 @@ +#if defined(__aarch64__) || defined(__arm64__) + +/* +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#define AARCH64_RET_VOID 0 +#define AARCH64_RET_INT64 1 +#define AARCH64_RET_INT128 2 + +#define AARCH64_RET_UNUSED3 3 +#define AARCH64_RET_UNUSED4 4 +#define AARCH64_RET_UNUSED5 5 +#define AARCH64_RET_UNUSED6 6 +#define AARCH64_RET_UNUSED7 7 + +/* Note that FFI_TYPE_FLOAT == 2, _DOUBLE == 3, _LONGDOUBLE == 4, + so _S4 through _Q1 are layed out as (TYPE * 4) + (4 - COUNT). */ +#define AARCH64_RET_S4 8 +#define AARCH64_RET_S3 9 +#define AARCH64_RET_S2 10 +#define AARCH64_RET_S1 11 + +#define AARCH64_RET_D4 12 +#define AARCH64_RET_D3 13 +#define AARCH64_RET_D2 14 +#define AARCH64_RET_D1 15 + +#define AARCH64_RET_Q4 16 +#define AARCH64_RET_Q3 17 +#define AARCH64_RET_Q2 18 +#define AARCH64_RET_Q1 19 + +/* Note that each of the sub-64-bit integers gets two entries. */ +#define AARCH64_RET_UINT8 20 +#define AARCH64_RET_UINT16 22 +#define AARCH64_RET_UINT32 24 + +#define AARCH64_RET_SINT8 26 +#define AARCH64_RET_SINT16 28 +#define AARCH64_RET_SINT32 30 + +#define AARCH64_RET_MASK 31 + +#define AARCH64_RET_IN_MEM (1 << 5) +#define AARCH64_RET_NEED_COPY (1 << 6) + +#define AARCH64_FLAG_ARG_V_BIT 7 +#define AARCH64_FLAG_ARG_V (1 << AARCH64_FLAG_ARG_V_BIT) + +#define N_X_ARG_REG 8 +#define N_V_ARG_REG 8 +#define CALL_CONTEXT_SIZE (N_V_ARG_REG * 16 + N_X_ARG_REG * 8) + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/aarch64/sysv_arm64.S b/module/src/main/cpp/whale/src/libffi/aarch64/sysv_arm64.S new file mode 100644 index 00000000..84c925c3 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/aarch64/sysv_arm64.S @@ -0,0 +1,441 @@ +#if defined(__aarch64__) + +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + +#ifdef HAVE_MACHINE_ASM_H +#include +#else +#ifdef __USER_LABEL_PREFIX__ +#define CONCAT1(a, b) CONCAT2(a, b) +#define CONCAT2(a, b) a ## b + +/* Use the right prefix for global labels. */ +#define CNAME(x) CONCAT1 (__USER_LABEL_PREFIX__, x) +#else +#define CNAME(x) x +#endif +#endif + +#ifdef __AARCH64EB__ +# define BE(X) X +#else +# define BE(X) 0 +#endif + +#ifdef __ILP32__ +#define PTR_REG(n) w##n +#else +#define PTR_REG(n) x##n +#endif + +#ifdef __ILP32__ +#define PTR_SIZE 4 +#else +#define PTR_SIZE 8 +#endif + + .text + .align 4 + +/* ffi_call_SYSV + extern void ffi_call_SYSV (void *stack, void *frame, + void (*fn)(void), void *rvalue, + int flags, void *closure); + + Therefore on entry we have: + + x0 stack + x1 frame + x2 fn + x3 rvalue + x4 flags + x5 closure +*/ + + cfi_startproc +CNAME(ffi_call_SYSV): + /* Use a stack frame allocated by our caller. */ + cfi_def_cfa(x1, 32); + stp x29, x30, [x1] + mov x29, x1 + mov sp, x0 + cfi_def_cfa_register(x29) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + mov x9, x2 /* save fn */ + mov x8, x3 /* install structure return */ +#ifdef FFI_GO_CLOSURES + mov x18, x5 /* install static chain */ +#endif + stp x3, x4, [x29, #16] /* save rvalue and flags */ + + /* Load the vector argument passing registers, if necessary. */ + tbz w4, #AARCH64_FLAG_ARG_V_BIT, 1f + ldp q0, q1, [sp, #0] + ldp q2, q3, [sp, #32] + ldp q4, q5, [sp, #64] + ldp q6, q7, [sp, #96] +1: + /* Load the core argument passing registers, including + the structure return pointer. */ + ldp x0, x1, [sp, #16*N_V_ARG_REG + 0] + ldp x2, x3, [sp, #16*N_V_ARG_REG + 16] + ldp x4, x5, [sp, #16*N_V_ARG_REG + 32] + ldp x6, x7, [sp, #16*N_V_ARG_REG + 48] + + /* Deallocate the context, leaving the stacked arguments. */ + add sp, sp, #CALL_CONTEXT_SIZE + + blr x9 /* call fn */ + + ldp x3, x4, [x29, #16] /* reload rvalue and flags */ + + /* Partially deconstruct the stack frame. */ + mov sp, x29 + cfi_def_cfa_register (sp) + ldp x29, x30, [x29] + + /* Save the return value as directed. */ + adr x5, 0f + and w4, w4, #AARCH64_RET_MASK + add x5, x5, x4, lsl #3 + br x5 + + /* Note that each table entry is 2 insns, and thus 8 bytes. + For integer data, note that we're storing into ffi_arg + and therefore we want to extend to 64 bits; these types + have two consecutive entries allocated for them. */ + .align 4 +0: ret /* VOID */ + nop +1: str x0, [x3] /* INT64 */ + ret +2: stp x0, x1, [x3] /* INT128 */ + ret +3: brk #1000 /* UNUSED */ + ret +4: brk #1000 /* UNUSED */ + ret +5: brk #1000 /* UNUSED */ + ret +6: brk #1000 /* UNUSED */ + ret +7: brk #1000 /* UNUSED */ + ret +8: st4 { v0.s, v1.s, v2.s, v3.s }[0], [x3] /* S4 */ + ret +9: st3 { v0.s, v1.s, v2.s }[0], [x3] /* S3 */ + ret +10: stp s0, s1, [x3] /* S2 */ + ret +11: str s0, [x3] /* S1 */ + ret +12: st4 { v0.d, v1.d, v2.d, v3.d }[0], [x3] /* D4 */ + ret +13: st3 { v0.d, v1.d, v2.d }[0], [x3] /* D3 */ + ret +14: stp d0, d1, [x3] /* D2 */ + ret +15: str d0, [x3] /* D1 */ + ret +16: str q3, [x3, #48] /* Q4 */ + nop +17: str q2, [x3, #32] /* Q3 */ + nop +18: stp q0, q1, [x3] /* Q2 */ + ret +19: str q0, [x3] /* Q1 */ + ret +20: uxtb w0, w0 /* UINT8 */ + str x0, [x3] +21: ret /* reserved */ + nop +22: uxth w0, w0 /* UINT16 */ + str x0, [x3] +23: ret /* reserved */ + nop +24: mov w0, w0 /* UINT32 */ + str x0, [x3] +25: ret /* reserved */ + nop +26: sxtb x0, w0 /* SINT8 */ + str x0, [x3] +27: ret /* reserved */ + nop +28: sxth x0, w0 /* SINT16 */ + str x0, [x3] +29: ret /* reserved */ + nop +30: sxtw x0, w0 /* SINT32 */ + str x0, [x3] +31: ret /* reserved */ + nop + + cfi_endproc + + .globl CNAME(ffi_call_SYSV) +#ifdef __ELF__ + .type CNAME(ffi_call_SYSV), #function + .hidden CNAME(ffi_call_SYSV) + .size CNAME(ffi_call_SYSV), .-CNAME(ffi_call_SYSV) +#endif + +/* ffi_closure_SYSV + + Closure invocation glue. This is the low level code invoked directly by + the closure trampoline to setup and call a closure. + + On entry x17 points to a struct ffi_closure, x16 has been clobbered + all other registers are preserved. + + We allocate a call context and save the argument passing registers, + then invoked the generic C ffi_closure_SYSV_inner() function to do all + the real work, on return we load the result passing registers back from + the call context. +*/ + +#define ffi_closure_SYSV_FS (8*2 + CALL_CONTEXT_SIZE + 64) + + .align 4 +CNAME(ffi_closure_SYSV_V): + cfi_startproc + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + /* Save the argument passing vector registers. */ + stp q0, q1, [sp, #16 + 0] + stp q2, q3, [sp, #16 + 32] + stp q4, q5, [sp, #16 + 64] + stp q6, q7, [sp, #16 + 96] + b 0f + cfi_endproc + + .globl CNAME(ffi_closure_SYSV_V) +#ifdef __ELF__ + .type CNAME(ffi_closure_SYSV_V), #function + .hidden CNAME(ffi_closure_SYSV_V) + .size CNAME(ffi_closure_SYSV_V), . - CNAME(ffi_closure_SYSV_V) +#endif + + .align 4 + cfi_startproc +CNAME(ffi_closure_SYSV): + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) +0: + mov x29, sp + + /* Save the argument passing core registers. */ + stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0] + stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16] + stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32] + stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48] + + /* Load ffi_closure_inner arguments. */ + ldp PTR_REG(0), PTR_REG(1), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET] /* load cif, fn */ + ldr PTR_REG(2), [x17, #FFI_TRAMPOLINE_CLOSURE_OFFSET+PTR_SIZE*2] /* load user_data */ +.Ldo_closure: + add x3, sp, #16 /* load context */ + add x4, sp, #ffi_closure_SYSV_FS /* load stack */ + add x5, sp, #16+CALL_CONTEXT_SIZE /* load rvalue */ + mov x6, x8 /* load struct_rval */ + bl CNAME(ffi_closure_SYSV_inner) + + /* Load the return value as directed. */ + adr x1, 0f + and w0, w0, #AARCH64_RET_MASK + add x1, x1, x0, lsl #3 + add x3, sp, #16+CALL_CONTEXT_SIZE + br x1 + + /* Note that each table entry is 2 insns, and thus 8 bytes. */ + .align 4 +0: b 99f /* VOID */ + nop +1: ldr x0, [x3] /* INT64 */ + b 99f +2: ldp x0, x1, [x3] /* INT128 */ + b 99f +3: brk #1000 /* UNUSED */ + nop +4: brk #1000 /* UNUSED */ + nop +5: brk #1000 /* UNUSED */ + nop +6: brk #1000 /* UNUSED */ + nop +7: brk #1000 /* UNUSED */ + nop +8: ldr s3, [x3, #12] /* S4 */ + nop +9: ldr s2, [x3, #8] /* S3 */ + nop +10: ldp s0, s1, [x3] /* S2 */ + b 99f +11: ldr s0, [x3] /* S1 */ + b 99f +12: ldr d3, [x3, #24] /* D4 */ + nop +13: ldr d2, [x3, #16] /* D3 */ + nop +14: ldp d0, d1, [x3] /* D2 */ + b 99f +15: ldr d0, [x3] /* D1 */ + b 99f +16: ldr q3, [x3, #48] /* Q4 */ + nop +17: ldr q2, [x3, #32] /* Q3 */ + nop +18: ldp q0, q1, [x3] /* Q2 */ + b 99f +19: ldr q0, [x3] /* Q1 */ + b 99f +20: ldrb w0, [x3, #BE(7)] /* UINT8 */ + b 99f +21: brk #1000 /* reserved */ + nop +22: ldrh w0, [x3, #BE(6)] /* UINT16 */ + b 99f +23: brk #1000 /* reserved */ + nop +24: ldr w0, [x3, #BE(4)] /* UINT32 */ + b 99f +25: brk #1000 /* reserved */ + nop +26: ldrsb x0, [x3, #BE(7)] /* SINT8 */ + b 99f +27: brk #1000 /* reserved */ + nop +28: ldrsh x0, [x3, #BE(6)] /* SINT16 */ + b 99f +29: brk #1000 /* reserved */ + nop +30: ldrsw x0, [x3, #BE(4)] /* SINT32 */ + nop +31: /* reserved */ +99: ldp x29, x30, [sp], #ffi_closure_SYSV_FS + cfi_adjust_cfa_offset (-ffi_closure_SYSV_FS) + cfi_restore (x29) + cfi_restore (x30) + ret + cfi_endproc + + .globl CNAME(ffi_closure_SYSV) +#ifdef __ELF__ + .type CNAME(ffi_closure_SYSV), #function + .hidden CNAME(ffi_closure_SYSV) + .size CNAME(ffi_closure_SYSV), . - CNAME(ffi_closure_SYSV) +#endif + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#include + .align PAGE_MAX_SHIFT +CNAME(ffi_closure_trampoline_table_page): + .rept PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE + adr x16, -PAGE_MAX_SIZE + ldp x17, x16, [x16] + br x16 + nop /* each entry in the trampoline config page is 2*sizeof(void*) so the trampoline itself cannot be smaller that 16 bytes */ + .endr + + .globl CNAME(ffi_closure_trampoline_table_page) + #ifdef __ELF__ + .type CNAME(ffi_closure_trampoline_table_page), #function + .hidden CNAME(ffi_closure_trampoline_table_page) + .size CNAME(ffi_closure_trampoline_table_page), . - CNAME(ffi_closure_trampoline_table_page) + #endif +#endif + +#endif /* FFI_EXEC_TRAMPOLINE_TABLE */ + +#ifdef FFI_GO_CLOSURES + .align 4 +CNAME(ffi_go_closure_SYSV_V): + cfi_startproc + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + /* Save the argument passing vector registers. */ + stp q0, q1, [sp, #16 + 0] + stp q2, q3, [sp, #16 + 32] + stp q4, q5, [sp, #16 + 64] + stp q6, q7, [sp, #16 + 96] + b 0f + cfi_endproc + + .globl CNAME(ffi_go_closure_SYSV_V) +#ifdef __ELF__ + .type CNAME(ffi_go_closure_SYSV_V), #function + .hidden CNAME(ffi_go_closure_SYSV_V) + .size CNAME(ffi_go_closure_SYSV_V), . - CNAME(ffi_go_closure_SYSV_V) +#endif + + .align 4 + cfi_startproc +CNAME(ffi_go_closure_SYSV): + stp x29, x30, [sp, #-ffi_closure_SYSV_FS]! + cfi_adjust_cfa_offset (ffi_closure_SYSV_FS) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) +0: + mov x29, sp + + /* Save the argument passing core registers. */ + stp x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0] + stp x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16] + stp x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32] + stp x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48] + + /* Load ffi_closure_inner arguments. */ + ldp PTR_REG(0), PTR_REG(1), [x18, #PTR_SIZE]/* load cif, fn */ + mov x2, x18 /* load user_data */ + b .Ldo_closure + cfi_endproc + + .globl CNAME(ffi_go_closure_SYSV) +#ifdef __ELF__ + .type CNAME(ffi_go_closure_SYSV), #function + .hidden CNAME(ffi_go_closure_SYSV) + .size CNAME(ffi_go_closure_SYSV), . - CNAME(ffi_go_closure_SYSV) +#endif +#endif /* FFI_GO_CLOSURES */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",%progbits +#endif + +#endif diff --git a/module/src/main/cpp/whale/src/libffi/arm/ffi_armv7.c b/module/src/main/cpp/whale/src/libffi/arm/ffi_armv7.c new file mode 100644 index 00000000..c14db868 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/arm/ffi_armv7.c @@ -0,0 +1,824 @@ +#ifdef __arm__ + +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2011 Timothy Wall + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + Copyright (c) 2011 Anthony Green + Copyright (c) 2011 Free Software Foundation + Copyright (c) 1998, 2008, 2011 Red Hat, Inc. + + ARM Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include +#include +#include "internal.h" + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#include +#endif + +#else +extern unsigned int ffi_arm_trampoline[2] FFI_HIDDEN; +#endif + +/* Forward declares. */ +static int vfp_type_p (const ffi_type *); +static void layout_vfp_args (ffi_cif *); + +static void * +ffi_align (ffi_type *ty, void *p) +{ + /* Align if necessary */ + size_t alignment; +#ifdef _WIN32_WCE + alignment = 4; +#else + alignment = ty->alignment; + if (alignment < 4) + alignment = 4; +#endif + return (void *) FFI_ALIGN (p, alignment); +} + +static size_t +ffi_put_arg (ffi_type *ty, void *src, void *dst) +{ + size_t z = ty->size; + + switch (ty->type) + { + case FFI_TYPE_SINT8: + *(UINT32 *)dst = *(SINT8 *)src; + break; + case FFI_TYPE_UINT8: + *(UINT32 *)dst = *(UINT8 *)src; + break; + case FFI_TYPE_SINT16: + *(UINT32 *)dst = *(SINT16 *)src; + break; + case FFI_TYPE_UINT16: + *(UINT32 *)dst = *(UINT16 *)src; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + case FFI_TYPE_FLOAT: + *(UINT32 *)dst = *(UINT32 *)src; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + case FFI_TYPE_DOUBLE: + *(UINT64 *)dst = *(UINT64 *)src; + break; + + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + memcpy (dst, src, z); + break; + + default: + abort(); + } + + return FFI_ALIGN (z, 4); +} + +/* ffi_prep_args is called once stack space has been allocated + for the function's arguments. + + The vfp_space parameter is the load area for VFP regs, the return + value is cif->vfp_used (word bitset of VFP regs used for passing + arguments). These are only used for the VFP hard-float ABI. +*/ +static void +ffi_prep_args_SYSV (ffi_cif *cif, int flags, void *rvalue, + void **avalue, char *argp) +{ + ffi_type **arg_types = cif->arg_types; + int i, n; + + if (flags == ARM_TYPE_STRUCT) + { + *(void **) argp = rvalue; + argp += 4; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + argp = ffi_align (ty, argp); + argp += ffi_put_arg (ty, avalue[i], argp); + } +} + +static void +ffi_prep_args_VFP (ffi_cif *cif, int flags, void *rvalue, + void **avalue, char *stack, char *vfp_space) +{ + ffi_type **arg_types = cif->arg_types; + int i, n, vi = 0; + char *argp, *regp, *eo_regp; + char stack_used = 0; + char done_with_regs = 0; + + /* The first 4 words on the stack are used for values + passed in core registers. */ + regp = stack; + eo_regp = argp = regp + 16; + + /* If the function returns an FFI_TYPE_STRUCT in memory, + that address is passed in r0 to the function. */ + if (flags == ARM_TYPE_STRUCT) + { + *(void **) regp = rvalue; + regp += 4; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + void *a = avalue[i]; + int is_vfp_type = vfp_type_p (ty); + + /* Allocated in VFP registers. */ + if (vi < cif->vfp_nargs && is_vfp_type) + { + char *vfp_slot = vfp_space + cif->vfp_args[vi++] * 4; + ffi_put_arg (ty, a, vfp_slot); + continue; + } + /* Try allocating in core registers. */ + else if (!done_with_regs && !is_vfp_type) + { + char *tregp = ffi_align (ty, regp); + size_t size = ty->size; + size = (size < 4) ? 4 : size; // pad + /* Check if there is space left in the aligned register + area to place the argument. */ + if (tregp + size <= eo_regp) + { + regp = tregp + ffi_put_arg (ty, a, tregp); + done_with_regs = (regp == argp); + // ensure we did not write into the stack area + FFI_ASSERT (regp <= argp); + continue; + } + /* In case there are no arguments in the stack area yet, + the argument is passed in the remaining core registers + and on the stack. */ + else if (!stack_used) + { + stack_used = 1; + done_with_regs = 1; + argp = tregp + ffi_put_arg (ty, a, tregp); + FFI_ASSERT (eo_regp < argp); + continue; + } + } + /* Base case, arguments are passed on the stack */ + stack_used = 1; + argp = ffi_align (ty, argp); + argp += ffi_put_arg (ty, a, argp); + } +} + +/* Perform machine dependent cif processing */ +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + int flags = 0, cabi = cif->abi; + size_t bytes = cif->bytes; + + /* Map out the register placements of VFP register args. The VFP + hard-float calling conventions are slightly more sophisticated + than the base calling conventions, so we do it here instead of + in ffi_prep_args(). */ + if (cabi == FFI_VFP) + layout_vfp_args (cif); + + /* Set the return type flag */ + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + flags = ARM_TYPE_VOID; + break; + + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_POINTER: + flags = ARM_TYPE_INT; + break; + + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = ARM_TYPE_INT64; + break; + + case FFI_TYPE_FLOAT: + flags = (cabi == FFI_VFP ? ARM_TYPE_VFP_S : ARM_TYPE_INT); + break; + case FFI_TYPE_DOUBLE: + flags = (cabi == FFI_VFP ? ARM_TYPE_VFP_D : ARM_TYPE_INT64); + break; + + case FFI_TYPE_STRUCT: + case FFI_TYPE_COMPLEX: + if (cabi == FFI_VFP) + { + int h = vfp_type_p (cif->rtype); + + flags = ARM_TYPE_VFP_N; + if (h == 0x100 + FFI_TYPE_FLOAT) + flags = ARM_TYPE_VFP_S; + if (h == 0x100 + FFI_TYPE_DOUBLE) + flags = ARM_TYPE_VFP_D; + if (h != 0) + break; + } + + /* A Composite Type not larger than 4 bytes is returned in r0. + A Composite Type larger than 4 bytes, or whose size cannot + be determined statically ... is stored in memory at an + address passed [in r0]. */ + if (cif->rtype->size <= 4) + flags = ARM_TYPE_INT; + else + { + flags = ARM_TYPE_STRUCT; + bytes += 4; + } + break; + + default: + abort(); + } + + /* Round the stack up to a multiple of 8 bytes. This isn't needed + everywhere, but it is on some platforms, and it doesn't harm anything + when it isn't needed. */ + bytes = FFI_ALIGN (bytes, 8); + + /* Minimum stack space is the 4 register arguments that we pop. */ + if (bytes < 4*4) + bytes = 4*4; + + cif->bytes = bytes; + cif->flags = flags; + + return FFI_OK; +} + +/* Perform machine dependent cif processing for variadic calls */ +ffi_status +ffi_prep_cif_machdep_var (ffi_cif * cif, + unsigned int nfixedargs, unsigned int ntotalargs) +{ + /* VFP variadic calls actually use the SYSV ABI */ + if (cif->abi == FFI_VFP) + cif->abi = FFI_SYSV; + + return ffi_prep_cif_machdep (cif); +} + +/* Prototypes for assembly functions, in sysv.S. */ + +struct call_frame +{ + void *fp; + void *lr; + void *rvalue; + int flags; + void *closure; +}; + +extern void ffi_call_SYSV (void *stack, struct call_frame *, + void (*fn) (void)) FFI_HIDDEN; +extern void ffi_call_VFP (void *vfp_space, struct call_frame *, + void (*fn) (void), unsigned vfp_used) FFI_HIDDEN; + +static void +ffi_call_int (ffi_cif * cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + int flags = cif->flags; + ffi_type *rtype = cif->rtype; + size_t bytes, rsize, vfp_size; + char *stack, *vfp_space, *new_rvalue; + struct call_frame *frame; + + rsize = 0; + if (rvalue == NULL) + { + /* If the return value is a struct and we don't have a return + value address then we need to make one. Otherwise the return + value is in registers and we can ignore them. */ + if (flags == ARM_TYPE_STRUCT) + rsize = rtype->size; + else + flags = ARM_TYPE_VOID; + } + else if (flags == ARM_TYPE_VFP_N) + { + /* Largest case is double x 4. */ + rsize = 32; + } + else if (flags == ARM_TYPE_INT && rtype->type == FFI_TYPE_STRUCT) + rsize = 4; + + /* Largest case. */ + vfp_size = (cif->abi == FFI_VFP && cif->vfp_used ? 8*8: 0); + + bytes = cif->bytes; + stack = alloca (vfp_size + bytes + sizeof(struct call_frame) + rsize); + + vfp_space = NULL; + if (vfp_size) + { + vfp_space = stack; + stack += vfp_size; + } + + frame = (struct call_frame *)(stack + bytes); + + new_rvalue = rvalue; + if (rsize) + new_rvalue = (void *)(frame + 1); + + frame->rvalue = new_rvalue; + frame->flags = flags; + frame->closure = closure; + + if (vfp_space) + { + ffi_prep_args_VFP (cif, flags, new_rvalue, avalue, stack, vfp_space); + ffi_call_VFP (vfp_space, frame, fn, cif->vfp_used); + } + else + { + ffi_prep_args_SYSV (cif, flags, new_rvalue, avalue, stack); + ffi_call_SYSV (stack, frame, fn); + } + + if (rvalue && rvalue != new_rvalue) + memcpy (rvalue, new_rvalue, rtype->size); +} + +void +ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + +static void * +ffi_prep_incoming_args_SYSV (ffi_cif *cif, void *rvalue, + char *argp, void **avalue) +{ + ffi_type **arg_types = cif->arg_types; + int i, n; + + if (cif->flags == ARM_TYPE_STRUCT) + { + rvalue = *(void **) argp; + argp += 4; + } + else + { + if (cif->rtype->size && cif->rtype->size < 4) + *(uint32_t *) rvalue = 0; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + size_t z = ty->size; + + argp = ffi_align (ty, argp); + avalue[i] = (void *) argp; + argp += z; + } + + return rvalue; +} + +static void * +ffi_prep_incoming_args_VFP (ffi_cif *cif, void *rvalue, char *stack, + char *vfp_space, void **avalue) +{ + ffi_type **arg_types = cif->arg_types; + int i, n, vi = 0; + char *argp, *regp, *eo_regp; + char done_with_regs = 0; + char stack_used = 0; + + regp = stack; + eo_regp = argp = regp + 16; + + if (cif->flags == ARM_TYPE_STRUCT) + { + rvalue = *(void **) regp; + regp += 4; + } + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + int is_vfp_type = vfp_type_p (ty); + size_t z = ty->size; + + if (vi < cif->vfp_nargs && is_vfp_type) + { + avalue[i] = vfp_space + cif->vfp_args[vi++] * 4; + continue; + } + else if (!done_with_regs && !is_vfp_type) + { + char *tregp = ffi_align (ty, regp); + + z = (z < 4) ? 4 : z; // pad + + /* If the arguments either fits into the registers or uses registers + and stack, while we haven't read other things from the stack */ + if (tregp + z <= eo_regp || !stack_used) + { + /* Because we're little endian, this is what it turns into. */ + avalue[i] = (void *) tregp; + regp = tregp + z; + + /* If we read past the last core register, make sure we + have not read from the stack before and continue + reading after regp. */ + if (regp > eo_regp) + { + FFI_ASSERT (!stack_used); + argp = regp; + } + if (regp >= eo_regp) + { + done_with_regs = 1; + stack_used = 1; + } + continue; + } + } + + stack_used = 1; + argp = ffi_align (ty, argp); + avalue[i] = (void *) argp; + argp += z; + } + + return rvalue; +} + +struct closure_frame +{ + char vfp_space[8*8] __attribute__((aligned(8))); + char result[8*4]; + char argp[]; +}; + +int FFI_HIDDEN +ffi_closure_inner_SYSV (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + struct closure_frame *frame) +{ + void **avalue = (void **) alloca (cif->nargs * sizeof (void *)); + void *rvalue = ffi_prep_incoming_args_SYSV (cif, frame->result, + frame->argp, avalue); + fun (cif, rvalue, avalue, user_data); + return cif->flags; +} + +int FFI_HIDDEN +ffi_closure_inner_VFP (ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, + struct closure_frame *frame) +{ + void **avalue = (void **) alloca (cif->nargs * sizeof (void *)); + void *rvalue = ffi_prep_incoming_args_VFP (cif, frame->result, frame->argp, + frame->vfp_space, avalue); + fun (cif, rvalue, avalue, user_data); + return cif->flags; +} + +void ffi_closure_SYSV (void) FFI_HIDDEN; +void ffi_closure_VFP (void) FFI_HIDDEN; +void ffi_go_closure_SYSV (void) FFI_HIDDEN; +void ffi_go_closure_VFP (void) FFI_HIDDEN; + +/* the cif must already be prep'ed */ + +ffi_status +ffi_prep_closure_loc (ffi_closure * closure, + ffi_cif * cif, + void (*fun) (ffi_cif *, void *, void **, void *), + void *user_data, void *codeloc) +{ + void (*closure_func) (void) = ffi_closure_SYSV; + + if (cif->abi == FFI_VFP) + { + /* We only need take the vfp path if there are vfp arguments. */ + if (cif->vfp_used) + closure_func = ffi_closure_VFP; + } + else if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + +#if FFI_EXEC_TRAMPOLINE_TABLE + void **config = (void **)((uint8_t *)codeloc - PAGE_MAX_SIZE); + config[0] = closure; + config[1] = closure_func; +#else + memcpy (closure->tramp, ffi_arm_trampoline, 8); +#if defined (__QNX__) + msync(closure->tramp, 8, 0x1000000); /* clear data map */ + msync(codeloc, 8, 0x1000000); /* clear insn map */ +#else + __clear_cache(closure->tramp, closure->tramp + 8); /* clear data map */ + __clear_cache(codeloc, codeloc + 8); /* clear insn map */ +#endif + *(void (**)(void))(closure->tramp + 8) = closure_func; +#endif + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +ffi_status +ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, + void (*fun) (ffi_cif *, void *, void **, void *)) +{ + void (*closure_func) (void) = ffi_go_closure_SYSV; + + if (cif->abi == FFI_VFP) + { + /* We only need take the vfp path if there are vfp arguments. */ + if (cif->vfp_used) + closure_func = ffi_go_closure_VFP; + } + else if (cif->abi != FFI_SYSV) + return FFI_BAD_ABI; + + closure->tramp = closure_func; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +/* Below are routines for VFP hard-float support. */ + +/* A subroutine of vfp_type_p. Given a structure type, return the type code + of the first non-structure element. Recurse for structure elements. + Return -1 if the structure is in fact empty, i.e. no nested elements. */ + +static int +is_hfa0 (const ffi_type *ty) +{ + ffi_type **elements = ty->elements; + int i, ret = -1; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + ret = elements[i]->type; + if (ret == FFI_TYPE_STRUCT || ret == FFI_TYPE_COMPLEX) + { + ret = is_hfa0 (elements[i]); + if (ret < 0) + continue; + } + break; + } + + return ret; +} + +/* A subroutine of vfp_type_p. Given a structure type, return true if all + of the non-structure elements are the same as CANDIDATE. */ + +static int +is_hfa1 (const ffi_type *ty, int candidate) +{ + ffi_type **elements = ty->elements; + int i; + + if (elements != NULL) + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + return 1; +} + +/* Determine if TY is an homogenous floating point aggregate (HFA). + That is, a structure consisting of 1 to 4 members of all the same type, + where that type is a floating point scalar. + + Returns non-zero iff TY is an HFA. The result is an encoded value where + bits 0-7 contain the type code, and bits 8-10 contain the element count. */ + +static int +vfp_type_p (const ffi_type *ty) +{ + ffi_type **elements; + int candidate, i; + size_t size, ele_count; + + /* Quickest tests first. */ + candidate = ty->type; + switch (ty->type) + { + default: + return 0; + case FFI_TYPE_FLOAT: + case FFI_TYPE_DOUBLE: + ele_count = 1; + goto done; + case FFI_TYPE_COMPLEX: + candidate = ty->elements[0]->type; + if (candidate != FFI_TYPE_FLOAT && candidate != FFI_TYPE_DOUBLE) + return 0; + ele_count = 2; + goto done; + case FFI_TYPE_STRUCT: + break; + } + + /* No HFA types are smaller than 4 bytes, or larger than 32 bytes. */ + size = ty->size; + if (size < 4 || size > 32) + return 0; + + /* Find the type of the first non-structure member. */ + elements = ty->elements; + candidate = elements[0]->type; + if (candidate == FFI_TYPE_STRUCT || candidate == FFI_TYPE_COMPLEX) + { + for (i = 0; ; ++i) + { + candidate = is_hfa0 (elements[i]); + if (candidate >= 0) + break; + } + } + + /* If the first member is not a floating point type, it's not an HFA. + Also quickly re-check the size of the structure. */ + switch (candidate) + { + case FFI_TYPE_FLOAT: + ele_count = size / sizeof(float); + if (size != ele_count * sizeof(float)) + return 0; + break; + case FFI_TYPE_DOUBLE: + ele_count = size / sizeof(double); + if (size != ele_count * sizeof(double)) + return 0; + break; + default: + return 0; + } + if (ele_count > 4) + return 0; + + /* Finally, make sure that all scalar elements are the same type. */ + for (i = 0; elements[i]; ++i) + { + int t = elements[i]->type; + if (t == FFI_TYPE_STRUCT || t == FFI_TYPE_COMPLEX) + { + if (!is_hfa1 (elements[i], candidate)) + return 0; + } + else if (t != candidate) + return 0; + } + + /* All tests succeeded. Encode the result. */ + done: + return (ele_count << 8) | candidate; +} + +static int +place_vfp_arg (ffi_cif *cif, int h) +{ + unsigned short reg = cif->vfp_reg_free; + int align = 1, nregs = h >> 8; + + if ((h & 0xff) == FFI_TYPE_DOUBLE) + align = 2, nregs *= 2; + + /* Align register number. */ + if ((reg & 1) && align == 2) + reg++; + + while (reg + nregs <= 16) + { + int s, new_used = 0; + for (s = reg; s < reg + nregs; s++) + { + new_used |= (1 << s); + if (cif->vfp_used & (1 << s)) + { + reg += align; + goto next_reg; + } + } + /* Found regs to allocate. */ + cif->vfp_used |= new_used; + cif->vfp_args[cif->vfp_nargs++] = reg; + + /* Update vfp_reg_free. */ + if (cif->vfp_used & (1 << cif->vfp_reg_free)) + { + reg += nregs; + while (cif->vfp_used & (1 << reg)) + reg += 1; + cif->vfp_reg_free = reg; + } + return 0; + next_reg:; + } + // done, mark all regs as used + cif->vfp_reg_free = 16; + cif->vfp_used = 0xFFFF; + return 1; +} + +static void +layout_vfp_args (ffi_cif * cif) +{ + int i; + /* Init VFP fields */ + cif->vfp_used = 0; + cif->vfp_nargs = 0; + cif->vfp_reg_free = 0; + memset (cif->vfp_args, -1, 16); /* Init to -1. */ + + for (i = 0; i < cif->nargs; i++) + { + int h = vfp_type_p (cif->arg_types[i]); + if (h && place_vfp_arg (cif, h) == 1) + break; + } +} + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/arm/internal.h b/module/src/main/cpp/whale/src/libffi/arm/internal.h new file mode 100644 index 00000000..dc4ea2cc --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/arm/internal.h @@ -0,0 +1,12 @@ +#ifdef __arm__ + +#define ARM_TYPE_VFP_S 0 +#define ARM_TYPE_VFP_D 1 +#define ARM_TYPE_VFP_N 2 +#define ARM_TYPE_INT64 3 +#define ARM_TYPE_INT 4 +#define ARM_TYPE_VOID 5 +#define ARM_TYPE_STRUCT 6 + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/arm/sysv_armv7.S b/module/src/main/cpp/whale/src/libffi/arm/sysv_armv7.S new file mode 100644 index 00000000..741f205d --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/arm/sysv_armv7.S @@ -0,0 +1,388 @@ +#ifdef __arm__ + +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 1998, 2008, 2011 Red Hat, Inc. + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + + ARM Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#define LIBFFI_ASM +#include +#include +#include +#include "internal.h" + +/* GCC 4.8 provides __ARM_ARCH; construct it otherwise. */ +#ifndef __ARM_ARCH +# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ + || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ + || defined(__ARM_ARCH_7EM__) +# define __ARM_ARCH 7 +# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ + || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \ + || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \ + || defined(__ARM_ARCH_6M__) +# define __ARM_ARCH 6 +# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \ + || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \ + || defined(__ARM_ARCH_5TEJ__) +# define __ARM_ARCH 5 +# else +# define __ARM_ARCH 4 +# endif +#endif + +/* Conditionally compile unwinder directives. */ +#ifdef __ARM_EABI__ +# define UNWIND(...) __VA_ARGS__ +#else +# define UNWIND(...) +#endif + +#if defined(HAVE_AS_CFI_PSEUDO_OP) && defined(__ARM_EABI__) + .cfi_sections .debug_frame +#endif + +#define CONCAT(a, b) CONCAT2(a, b) +#define CONCAT2(a, b) a ## b + +#ifdef __USER_LABEL_PREFIX__ +# define CNAME(X) CONCAT (__USER_LABEL_PREFIX__, X) +#else +# define CNAME(X) X +#endif +#ifdef __ELF__ +# define SIZE(X) .size CNAME(X), . - CNAME(X) +# define TYPE(X, Y) .type CNAME(X), Y +#else +# define SIZE(X) +# define TYPE(X, Y) +#endif + +#define ARM_FUNC_START_LOCAL(name) \ + .align 3; \ + TYPE(CNAME(name), %function); \ + CNAME(name): + +#define ARM_FUNC_START(name) \ + .globl CNAME(name); \ + FFI_HIDDEN(CNAME(name)); \ + ARM_FUNC_START_LOCAL(name) + +#define ARM_FUNC_END(name) \ + SIZE(name) + +/* Aid in defining a jump table with 8 bytes between entries. */ +/* ??? The clang assembler doesn't handle .if with symbolic expressions. */ +#ifdef __clang__ +# define E(index) +#else +# define E(index) \ + .if . - 0b - 8*index; \ + .error "type table out of sync"; \ + .endif +#endif + + .text + .syntax unified + .arm + +#ifndef __clang__ + /* We require interworking on LDM, which implies ARMv5T, + which implies the existance of BLX. */ + .arch armv5t +#endif + + /* Note that we use STC and LDC to encode VFP instructions, + so that we do not need ".fpu vfp", nor get that added to + the object file attributes. These will not be executed + unless the FFI_VFP abi is used. */ + + @ r0: stack + @ r1: frame + @ r2: fn + @ r3: vfp_used + +ARM_FUNC_START(ffi_call_VFP) + UNWIND(.fnstart) + cfi_startproc + + cmp r3, #3 @ load only d0 if possible +#ifdef __clang__ + vldrle d0, [sp] + vldmgt sp, {d0-d7} +#else + ldcle p11, cr0, [r0] @ vldrle d0, [sp] + ldcgt p11, cr0, [r0], {16} @ vldmgt sp, {d0-d7} +#endif + add r0, r0, #64 @ discard the vfp register args + /* FALLTHRU */ +ARM_FUNC_END(ffi_call_VFP) + +ARM_FUNC_START(ffi_call_SYSV) + stm r1, {fp, lr} + mov fp, r1 + + @ This is a bit of a lie wrt the origin of the unwind info, but + @ now we've got the usual frame pointer and two saved registers. + UNWIND(.save {fp,lr}) + UNWIND(.setfp fp, sp) + cfi_def_cfa(fp, 8) + cfi_rel_offset(fp, 0) + cfi_rel_offset(lr, 4) + + mov sp, r0 @ install the stack pointer + mov lr, r2 @ move the fn pointer out of the way + ldr ip, [fp, #16] @ install the static chain + ldmia sp!, {r0-r3} @ move first 4 parameters in registers. + blx lr @ call fn + + @ Load r2 with the pointer to storage for the return value + @ Load r3 with the return type code + ldr r2, [fp, #8] + ldr r3, [fp, #12] + + @ Deallocate the stack with the arguments. + mov sp, fp + cfi_def_cfa_register(sp) + + @ Store values stored in registers. + .align 3 + add pc, pc, r3, lsl #3 + nop +0: +E(ARM_TYPE_VFP_S) +#ifdef __clang__ + vstr s0, [r2] +#else + stc p10, cr0, [r2] @ vstr s0, [r2] +#endif + pop {fp,pc} +E(ARM_TYPE_VFP_D) +#ifdef __clang__ + vstr d0, [r2] +#else + stc p11, cr0, [r2] @ vstr d0, [r2] +#endif + pop {fp,pc} +E(ARM_TYPE_VFP_N) +#ifdef __clang__ + vstm r2, {d0-d3} +#else + stc p11, cr0, [r2], {8} @ vstm r2, {d0-d3} +#endif + pop {fp,pc} +E(ARM_TYPE_INT64) + str r1, [r2, #4] + nop +E(ARM_TYPE_INT) + str r0, [r2] + pop {fp,pc} +E(ARM_TYPE_VOID) + pop {fp,pc} + nop +E(ARM_TYPE_STRUCT) + pop {fp,pc} + + cfi_endproc + UNWIND(.fnend) +ARM_FUNC_END(ffi_call_SYSV) + + +/* + int ffi_closure_inner_* (cif, fun, user_data, frame) +*/ + +ARM_FUNC_START(ffi_go_closure_SYSV) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + ldr r0, [ip, #4] @ load cif + ldr r1, [ip, #8] @ load fun + mov r2, ip @ load user_data + b 0f + cfi_endproc +ARM_FUNC_END(ffi_go_closure_SYSV) + +ARM_FUNC_START(ffi_closure_SYSV) + UNWIND(.fnstart) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + +#if FFI_EXEC_TRAMPOLINE_TABLE + ldr ip, [ip] @ ip points to the config page, dereference to get the ffi_closure* +#endif + ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] @ load cif + ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] @ load fun + ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] @ load user_data +0: + add ip, sp, #16 @ compute entry sp + sub sp, sp, #64+32 @ allocate frame + cfi_adjust_cfa_offset(64+32) + stmdb sp!, {ip,lr} + + /* Remember that EABI unwind info only applies at call sites. + We need do nothing except note the save of the stack pointer + and the link registers. */ + UNWIND(.save {sp,lr}) + cfi_adjust_cfa_offset(8) + cfi_rel_offset(lr, 4) + + add r3, sp, #8 @ load frame + bl CNAME(ffi_closure_inner_SYSV) + + @ Load values returned in registers. + add r2, sp, #8+64 @ load result + adr r3, CNAME(ffi_closure_ret) + add pc, r3, r0, lsl #3 + cfi_endproc + UNWIND(.fnend) +ARM_FUNC_END(ffi_closure_SYSV) + +ARM_FUNC_START(ffi_go_closure_VFP) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + ldr r0, [ip, #4] @ load cif + ldr r1, [ip, #8] @ load fun + mov r2, ip @ load user_data + b 0f + cfi_endproc +ARM_FUNC_END(ffi_go_closure_VFP) + +ARM_FUNC_START(ffi_closure_VFP) + UNWIND(.fnstart) + cfi_startproc + stmdb sp!, {r0-r3} @ save argument regs + cfi_adjust_cfa_offset(16) + +#if FFI_EXEC_TRAMPOLINE_TABLE + ldr ip, [ip] @ ip points to the config page, dereference to get the ffi_closure* +#endif + ldr r0, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET] @ load cif + ldr r1, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+4] @ load fun + ldr r2, [ip, #FFI_TRAMPOLINE_CLOSURE_OFFSET+8] @ load user_data +0: + add ip, sp, #16 + sub sp, sp, #64+32 @ allocate frame + cfi_adjust_cfa_offset(64+32) +#ifdef __clang__ + vstm sp, {d0-d7} +#else + stc p11, cr0, [sp], {16} @ vstm sp, {d0-d7} +#endif + stmdb sp!, {ip,lr} + + /* See above. */ + UNWIND(.save {sp,lr}) + cfi_adjust_cfa_offset(8) + cfi_rel_offset(lr, 4) + + add r3, sp, #8 @ load frame + bl CNAME(ffi_closure_inner_VFP) + + @ Load values returned in registers. + add r2, sp, #8+64 @ load result + adr r3, CNAME(ffi_closure_ret) + add pc, r3, r0, lsl #3 + cfi_endproc + UNWIND(.fnend) +ARM_FUNC_END(ffi_closure_VFP) + +/* Load values returned in registers for both closure entry points. + Note that we use LDM with SP in the register set. This is deprecated + by ARM, but not yet unpredictable. */ + +ARM_FUNC_START_LOCAL(ffi_closure_ret) + cfi_startproc + cfi_rel_offset(sp, 0) + cfi_rel_offset(lr, 4) +0: +E(ARM_TYPE_VFP_S) +#ifdef __clang__ + vldr s0, [r2] +#else + ldc p10, cr0, [r2] @ vldr s0, [r2] +#endif + ldm sp, {sp,pc} +E(ARM_TYPE_VFP_D) +#ifdef __clang__ + vldr d0, [r2] +#else + ldc p11, cr0, [r2] @ vldr d0, [r2] +#endif + ldm sp, {sp,pc} +E(ARM_TYPE_VFP_N) +#ifdef __clang__ + vldm r2, {d0-d3} +#else + ldc p11, cr0, [r2], {8} @ vldm r2, {d0-d3} +#endif + ldm sp, {sp,pc} +E(ARM_TYPE_INT64) + ldr r1, [r2, #4] + nop +E(ARM_TYPE_INT) + ldr r0, [r2] + ldm sp, {sp,pc} +E(ARM_TYPE_VOID) + ldm sp, {sp,pc} + nop +E(ARM_TYPE_STRUCT) + ldm sp, {sp,pc} + cfi_endproc +ARM_FUNC_END(ffi_closure_ret) + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#include + +.align PAGE_MAX_SHIFT +ARM_FUNC_START(ffi_closure_trampoline_table_page) +.rept PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE + adr ip, #-PAGE_MAX_SIZE @ the config page is PAGE_MAX_SIZE behind the trampoline page + sub ip, #8 @ account for pc bias + ldr pc, [ip, #4] @ jump to ffi_closure_SYSV or ffi_closure_VFP +.endr +ARM_FUNC_END(ffi_closure_trampoline_table_page) +#endif + +#else + +ARM_FUNC_START(ffi_arm_trampoline) +0: adr ip, 0b + ldr pc, 1f +1: .long 0 +ARM_FUNC_END(ffi_arm_trampoline) + +#endif /* FFI_EXEC_TRAMPOLINE_TABLE */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",%progbits +#endif + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/closures.c b/module/src/main/cpp/whale/src/libffi/closures.c new file mode 100644 index 00000000..15e6e0f0 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/closures.c @@ -0,0 +1,966 @@ +/* ----------------------------------------------------------------------- + closures.c - Copyright (c) 2007, 2009, 2010 Red Hat, Inc. + Copyright (C) 2007, 2009, 2010 Free Software Foundation, Inc + Copyright (c) 2011 Plausible Labs Cooperative, Inc. + + Code to allocate and deallocate memory for closures. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#if defined __linux__ && !defined _GNU_SOURCE +#define _GNU_SOURCE 1 +#endif + +#include +#include +#include + +#ifdef __NetBSD__ +#include +#endif + +#if __NetBSD_Version__ - 0 >= 799007200 +/* NetBSD with PROT_MPROTECT */ +#include + +#include +#include + +static const size_t overhead = + (sizeof(max_align_t) > sizeof(void *) + sizeof(size_t)) ? + sizeof(max_align_t) + : sizeof(void *) + sizeof(size_t); + +#define ADD_TO_POINTER(p, d) ((void *)((uintptr_t)(p) + (d))) + +void * +ffi_closure_alloc (size_t size, void **code) +{ + static size_t page_size; + size_t rounded_size; + void *codeseg, *dataseg; + int prot; + + /* Expect that PAX mprotect is active and a separate code mapping is necessary. */ + if (!code) + return NULL; + + /* Obtain system page size. */ + if (!page_size) + page_size = sysconf(_SC_PAGESIZE); + + /* Round allocation size up to the next page, keeping in mind the size field and pointer to code map. */ + rounded_size = (size + overhead + page_size - 1) & ~(page_size - 1); + + /* Primary mapping is RW, but request permission to switch to PROT_EXEC later. */ + prot = PROT_READ | PROT_WRITE | PROT_MPROTECT(PROT_EXEC); + dataseg = mmap(NULL, rounded_size, prot, MAP_ANON | MAP_PRIVATE, -1, 0); + if (dataseg == MAP_FAILED) + return NULL; + + /* Create secondary mapping and switch it to RX. */ + codeseg = mremap(dataseg, rounded_size, NULL, rounded_size, MAP_REMAPDUP); + if (codeseg == MAP_FAILED) { + munmap(dataseg, rounded_size); + return NULL; + } + if (mprotect(codeseg, rounded_size, PROT_READ | PROT_EXEC) == -1) { + munmap(codeseg, rounded_size); + munmap(dataseg, rounded_size); + return NULL; + } + + /* Remember allocation size and location of the secondary mapping for ffi_closure_free. */ + memcpy(dataseg, &rounded_size, sizeof(rounded_size)); + memcpy(ADD_TO_POINTER(dataseg, sizeof(size_t)), &codeseg, sizeof(void *)); + *code = ADD_TO_POINTER(codeseg, overhead); + return ADD_TO_POINTER(dataseg, overhead); +} + +void +ffi_closure_free (void *ptr) +{ + void *codeseg, *dataseg; + size_t rounded_size; + + dataseg = ADD_TO_POINTER(ptr, -overhead); + memcpy(&rounded_size, dataseg, sizeof(rounded_size)); + memcpy(&codeseg, ADD_TO_POINTER(dataseg, sizeof(size_t)), sizeof(void *)); + munmap(dataseg, rounded_size); + munmap(codeseg, rounded_size); +} +#else /* !NetBSD with PROT_MPROTECT */ + +#if !FFI_MMAP_EXEC_WRIT && !FFI_EXEC_TRAMPOLINE_TABLE +# if __linux__ && !defined(__ANDROID__) +/* This macro indicates it may be forbidden to map anonymous memory + with both write and execute permission. Code compiled when this + option is defined will attempt to map such pages once, but if it + fails, it falls back to creating a temporary file in a writable and + executable filesystem and mapping pages from it into separate + locations in the virtual memory space, one location writable and + another executable. */ +# define FFI_MMAP_EXEC_WRIT 1 +# define HAVE_MNTENT 1 +# endif +# if defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__) +/* Windows systems may have Data Execution Protection (DEP) enabled, + which requires the use of VirtualMalloc/VirtualFree to alloc/free + executable memory. */ +# define FFI_MMAP_EXEC_WRIT 1 +# endif +#endif + +#if FFI_MMAP_EXEC_WRIT && !defined FFI_MMAP_EXEC_SELINUX +# if defined(__linux__) && !defined(__ANDROID__) +/* When defined to 1 check for SELinux and if SELinux is active, + don't attempt PROT_EXEC|PROT_WRITE mapping at all, as that + might cause audit messages. */ +# define FFI_MMAP_EXEC_SELINUX 1 +# endif +#endif + +#if FFI_CLOSURES + +#if FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ + +#include +#include +#include +#include + +extern void *ffi_closure_trampoline_table_page; + +typedef struct ffi_trampoline_table ffi_trampoline_table; +typedef struct ffi_trampoline_table_entry ffi_trampoline_table_entry; + +struct ffi_trampoline_table +{ + /* contiguous writable and executable pages */ + vm_address_t config_page; + vm_address_t trampoline_page; + + /* free list tracking */ + uint16_t free_count; + ffi_trampoline_table_entry *free_list; + ffi_trampoline_table_entry *free_list_pool; + + ffi_trampoline_table *prev; + ffi_trampoline_table *next; +}; + +struct ffi_trampoline_table_entry +{ + void *(*trampoline) (); + ffi_trampoline_table_entry *next; +}; + +/* Total number of trampolines that fit in one trampoline table */ +#define FFI_TRAMPOLINE_COUNT (PAGE_MAX_SIZE / FFI_TRAMPOLINE_SIZE) + +static pthread_mutex_t ffi_trampoline_lock = PTHREAD_MUTEX_INITIALIZER; +static ffi_trampoline_table *ffi_trampoline_tables = NULL; + +static ffi_trampoline_table * +ffi_trampoline_table_alloc (void) +{ + ffi_trampoline_table *table; + vm_address_t config_page; + vm_address_t trampoline_page; + vm_address_t trampoline_page_template; + vm_prot_t cur_prot; + vm_prot_t max_prot; + kern_return_t kt; + uint16_t i; + + /* Allocate two pages -- a config page and a placeholder page */ + config_page = 0x0; + kt = vm_allocate (mach_task_self (), &config_page, PAGE_MAX_SIZE * 2, + VM_FLAGS_ANYWHERE); + if (kt != KERN_SUCCESS) + return NULL; + + /* Remap the trampoline table on top of the placeholder page */ + trampoline_page = config_page + PAGE_MAX_SIZE; + trampoline_page_template = (vm_address_t)&ffi_closure_trampoline_table_page; +#ifdef __arm__ + /* ffi_closure_trampoline_table_page can be thumb-biased on some ARM archs */ + trampoline_page_template &= ~1UL; +#endif + kt = vm_remap (mach_task_self (), &trampoline_page, PAGE_MAX_SIZE, 0x0, + VM_FLAGS_OVERWRITE, mach_task_self (), trampoline_page_template, + FALSE, &cur_prot, &max_prot, VM_INHERIT_SHARE); + if (kt != KERN_SUCCESS) + { + vm_deallocate (mach_task_self (), config_page, PAGE_MAX_SIZE * 2); + return NULL; + } + + /* We have valid trampoline and config pages */ + table = calloc (1, sizeof (ffi_trampoline_table)); + table->free_count = FFI_TRAMPOLINE_COUNT; + table->config_page = config_page; + table->trampoline_page = trampoline_page; + + /* Create and initialize the free list */ + table->free_list_pool = + calloc (FFI_TRAMPOLINE_COUNT, sizeof (ffi_trampoline_table_entry)); + + for (i = 0; i < table->free_count; i++) + { + ffi_trampoline_table_entry *entry = &table->free_list_pool[i]; + entry->trampoline = + (void *) (table->trampoline_page + (i * FFI_TRAMPOLINE_SIZE)); + + if (i < table->free_count - 1) + entry->next = &table->free_list_pool[i + 1]; + } + + table->free_list = table->free_list_pool; + + return table; +} + +static void +ffi_trampoline_table_free (ffi_trampoline_table *table) +{ + /* Remove from the list */ + if (table->prev != NULL) + table->prev->next = table->next; + + if (table->next != NULL) + table->next->prev = table->prev; + + /* Deallocate pages */ + vm_deallocate (mach_task_self (), table->config_page, PAGE_MAX_SIZE * 2); + + /* Deallocate free list */ + free (table->free_list_pool); + free (table); +} + +void * +ffi_closure_alloc (size_t size, void **code) +{ + /* Create the closure */ + ffi_closure *closure = malloc (size); + if (closure == NULL) + return NULL; + + pthread_mutex_lock (&ffi_trampoline_lock); + + /* Check for an active trampoline table with available entries. */ + ffi_trampoline_table *table = ffi_trampoline_tables; + if (table == NULL || table->free_list == NULL) + { + table = ffi_trampoline_table_alloc (); + if (table == NULL) + { + pthread_mutex_unlock (&ffi_trampoline_lock); + free (closure); + return NULL; + } + + /* Insert the new table at the top of the list */ + table->next = ffi_trampoline_tables; + if (table->next != NULL) + table->next->prev = table; + + ffi_trampoline_tables = table; + } + + /* Claim the free entry */ + ffi_trampoline_table_entry *entry = ffi_trampoline_tables->free_list; + ffi_trampoline_tables->free_list = entry->next; + ffi_trampoline_tables->free_count--; + entry->next = NULL; + + pthread_mutex_unlock (&ffi_trampoline_lock); + + /* Initialize the return values */ + *code = entry->trampoline; + closure->trampoline_table = table; + closure->trampoline_table_entry = entry; + + return closure; +} + +void +ffi_closure_free (void *ptr) +{ + ffi_closure *closure = ptr; + + pthread_mutex_lock (&ffi_trampoline_lock); + + /* Fetch the table and entry references */ + ffi_trampoline_table *table = closure->trampoline_table; + ffi_trampoline_table_entry *entry = closure->trampoline_table_entry; + + /* Return the entry to the free list */ + entry->next = table->free_list; + table->free_list = entry; + table->free_count++; + + /* If all trampolines within this table are free, and at least one other table exists, deallocate + * the table */ + if (table->free_count == FFI_TRAMPOLINE_COUNT + && ffi_trampoline_tables != table) + { + ffi_trampoline_table_free (table); + } + else if (ffi_trampoline_tables != table) + { + /* Otherwise, bump this table to the top of the list */ + table->prev = NULL; + table->next = ffi_trampoline_tables; + if (ffi_trampoline_tables != NULL) + ffi_trampoline_tables->prev = table; + + ffi_trampoline_tables = table; + } + + pthread_mutex_unlock (&ffi_trampoline_lock); + + /* Free the closure */ + free (closure); +} + +#endif + +// Per-target implementation; It's unclear what can reasonable be shared between two OS/architecture implementations. + +#elif FFI_MMAP_EXEC_WRIT /* !FFI_EXEC_TRAMPOLINE_TABLE */ + +#define USE_LOCKS 1 +#define USE_DL_PREFIX 1 +#ifdef __GNUC__ +#ifndef USE_BUILTIN_FFS +#define USE_BUILTIN_FFS 1 +#endif +#endif + +/* We need to use mmap, not sbrk. */ +#define HAVE_MORECORE 0 + +/* We could, in theory, support mremap, but it wouldn't buy us anything. */ +#define HAVE_MREMAP 0 + +/* We have no use for this, so save some code and data. */ +#define NO_MALLINFO 1 + +/* We need all allocations to be in regular segments, otherwise we + lose track of the corresponding code address. */ +#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T + +/* Don't allocate more than a page unless needed. */ +#define DEFAULT_GRANULARITY ((size_t)malloc_getpagesize) + +#include +#include +#include +#include +#ifndef _MSC_VER +#include +#endif +#include +#include +#if !defined(X86_WIN32) && !defined(X86_WIN64) +#ifdef HAVE_MNTENT +#include +#endif /* HAVE_MNTENT */ +#include +#include + +/* We don't want sys/mman.h to be included after we redefine mmap and + dlmunmap. */ +#include +#define LACKS_SYS_MMAN_H 1 + +#if FFI_MMAP_EXEC_SELINUX +#include +#include + +static int selinux_enabled = -1; + +static int +selinux_enabled_check (void) +{ + struct statfs sfs; + FILE *f; + char *buf = NULL; + size_t len = 0; + + if (statfs ("/selinux", &sfs) >= 0 + && (unsigned int) sfs.f_type == 0xf97cff8cU) + return 1; + f = fopen ("/proc/mounts", "r"); + if (f == NULL) + return 0; + while (getline (&buf, &len, f) >= 0) + { + char *p = strchr (buf, ' '); + if (p == NULL) + break; + p = strchr (p + 1, ' '); + if (p == NULL) + break; + if (strncmp (p + 1, "selinuxfs ", 10) == 0) + { + free (buf); + fclose (f); + return 1; + } + } + free (buf); + fclose (f); + return 0; +} + +#define is_selinux_enabled() (selinux_enabled >= 0 ? selinux_enabled \ + : (selinux_enabled = selinux_enabled_check ())) + +#else + +#define is_selinux_enabled() 0 + +#endif /* !FFI_MMAP_EXEC_SELINUX */ + +/* On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC. */ +#ifdef FFI_MMAP_EXEC_EMUTRAMP_PAX +#include + +static int emutramp_enabled = -1; + +static int +emutramp_enabled_check (void) +{ + char *buf = NULL; + size_t len = 0; + FILE *f; + int ret; + f = fopen ("/proc/self/status", "r"); + if (f == NULL) + return 0; + ret = 0; + + while (getline (&buf, &len, f) != -1) + if (!strncmp (buf, "PaX:", 4)) + { + char emutramp; + if (sscanf (buf, "%*s %*c%c", &emutramp) == 1) + ret = (emutramp == 'E'); + break; + } + free (buf); + fclose (f); + return ret; +} + +#define is_emutramp_enabled() (emutramp_enabled >= 0 ? emutramp_enabled \ + : (emutramp_enabled = emutramp_enabled_check ())) +#endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */ + +#elif defined (__CYGWIN__) || defined(__INTERIX) + +#include + +/* Cygwin is Linux-like, but not quite that Linux-like. */ +#define is_selinux_enabled() 0 + +#endif /* !defined(X86_WIN32) && !defined(X86_WIN64) */ + +#ifndef FFI_MMAP_EXEC_EMUTRAMP_PAX +#define is_emutramp_enabled() 0 +#endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */ + +/* Declare all functions defined in dlmalloc.c as static. */ +static void *dlmalloc(size_t); +static void dlfree(void*); +static void *dlcalloc(size_t, size_t) MAYBE_UNUSED; +static void *dlrealloc(void *, size_t) MAYBE_UNUSED; +static void *dlmemalign(size_t, size_t) MAYBE_UNUSED; +static void *dlvalloc(size_t) MAYBE_UNUSED; +static int dlmallopt(int, int) MAYBE_UNUSED; +static size_t dlmalloc_footprint(void) MAYBE_UNUSED; +static size_t dlmalloc_max_footprint(void) MAYBE_UNUSED; +static void** dlindependent_calloc(size_t, size_t, void**) MAYBE_UNUSED; +static void** dlindependent_comalloc(size_t, size_t*, void**) MAYBE_UNUSED; +static void *dlpvalloc(size_t) MAYBE_UNUSED; +static int dlmalloc_trim(size_t) MAYBE_UNUSED; +static size_t dlmalloc_usable_size(void*) MAYBE_UNUSED; +static void dlmalloc_stats(void) MAYBE_UNUSED; + +#if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) +/* Use these for mmap and munmap within dlmalloc.c. */ +static void *dlmmap(void *, size_t, int, int, int, off_t); +static int dlmunmap(void *, size_t); +#endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */ + +#define mmap dlmmap +#define munmap dlmunmap + +#include "dlmalloc.c" + +#undef mmap +#undef munmap + +#if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) + +/* A mutex used to synchronize access to *exec* variables in this file. */ +static pthread_mutex_t open_temp_exec_file_mutex = PTHREAD_MUTEX_INITIALIZER; + +/* A file descriptor of a temporary file from which we'll map + executable pages. */ +static int execfd = -1; + +/* The amount of space already allocated from the temporary file. */ +static size_t execsize = 0; + +/* Open a temporary file name, and immediately unlink it. */ +static int +open_temp_exec_file_name (char *name, int flags) +{ + int fd; + +#ifdef HAVE_MKOSTEMP + fd = mkostemp (name, flags); +#else + fd = mkstemp (name); +#endif + + if (fd != -1) + unlink (name); + + return fd; +} + +/* Open a temporary file in the named directory. */ +static int +open_temp_exec_file_dir (const char *dir) +{ + static const char suffix[] = "/ffiXXXXXX"; + int lendir, flags; + char *tempname; +#ifdef O_TMPFILE + int fd; +#endif + +#ifdef O_CLOEXEC + flags = O_CLOEXEC; +#else + flags = 0; +#endif + +#ifdef O_TMPFILE + fd = open (dir, flags | O_RDWR | O_EXCL | O_TMPFILE, 0700); + /* If the running system does not support the O_TMPFILE flag then retry without it. */ + if (fd != -1 || (errno != EINVAL && errno != EISDIR && errno != EOPNOTSUPP)) { + return fd; + } else { + errno = 0; + } +#endif + + lendir = (int) strlen (dir); + tempname = __builtin_alloca (lendir + sizeof (suffix)); + + if (!tempname) + return -1; + + memcpy (tempname, dir, lendir); + memcpy (tempname + lendir, suffix, sizeof (suffix)); + + return open_temp_exec_file_name (tempname, flags); +} + +/* Open a temporary file in the directory in the named environment + variable. */ +static int +open_temp_exec_file_env (const char *envvar) +{ + const char *value = getenv (envvar); + + if (!value) + return -1; + + return open_temp_exec_file_dir (value); +} + +#ifdef HAVE_MNTENT +/* Open a temporary file in an executable and writable mount point + listed in the mounts file. Subsequent calls with the same mounts + keep searching for mount points in the same file. Providing NULL + as the mounts file closes the file. */ +static int +open_temp_exec_file_mnt (const char *mounts) +{ + static const char *last_mounts; + static FILE *last_mntent; + + if (mounts != last_mounts) + { + if (last_mntent) + endmntent (last_mntent); + + last_mounts = mounts; + + if (mounts) + last_mntent = setmntent (mounts, "r"); + else + last_mntent = NULL; + } + + if (!last_mntent) + return -1; + + for (;;) + { + int fd; + struct mntent mnt; + char buf[MAXPATHLEN * 3]; + + if (getmntent_r (last_mntent, &mnt, buf, sizeof (buf)) == NULL) + return -1; + + if (hasmntopt (&mnt, "ro") + || hasmntopt (&mnt, "noexec") + || access (mnt.mnt_dir, W_OK)) + continue; + + fd = open_temp_exec_file_dir (mnt.mnt_dir); + + if (fd != -1) + return fd; + } +} +#endif /* HAVE_MNTENT */ + +/* Instructions to look for a location to hold a temporary file that + can be mapped in for execution. */ +static struct +{ + int (*func)(const char *); + const char *arg; + int repeat; +} open_temp_exec_file_opts[] = { + { open_temp_exec_file_env, "TMPDIR", 0 }, + { open_temp_exec_file_dir, "/tmp", 0 }, + { open_temp_exec_file_dir, "/var/tmp", 0 }, + { open_temp_exec_file_dir, "/dev/shm", 0 }, + { open_temp_exec_file_env, "HOME", 0 }, +#ifdef HAVE_MNTENT + { open_temp_exec_file_mnt, "/etc/mtab", 1 }, + { open_temp_exec_file_mnt, "/proc/mounts", 1 }, +#endif /* HAVE_MNTENT */ +}; + +/* Current index into open_temp_exec_file_opts. */ +static int open_temp_exec_file_opts_idx = 0; + +/* Reset a current multi-call func, then advances to the next entry. + If we're at the last, go back to the first and return nonzero, + otherwise return zero. */ +static int +open_temp_exec_file_opts_next (void) +{ + if (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat) + open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func (NULL); + + open_temp_exec_file_opts_idx++; + if (open_temp_exec_file_opts_idx + == (sizeof (open_temp_exec_file_opts) + / sizeof (*open_temp_exec_file_opts))) + { + open_temp_exec_file_opts_idx = 0; + return 1; + } + + return 0; +} + +/* Return a file descriptor of a temporary zero-sized file in a + writable and executable filesystem. */ +static int +open_temp_exec_file (void) +{ + int fd; + + do + { + fd = open_temp_exec_file_opts[open_temp_exec_file_opts_idx].func + (open_temp_exec_file_opts[open_temp_exec_file_opts_idx].arg); + + if (!open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat + || fd == -1) + { + if (open_temp_exec_file_opts_next ()) + break; + } + } + while (fd == -1); + + return fd; +} + +/* We need to allocate space in a file that will be backing a writable + mapping. Several problems exist with the usual approaches: + - fallocate() is Linux-only + - posix_fallocate() is not available on all platforms + - ftruncate() does not allocate space on filesystems with sparse files + Failure to allocate the space will cause SIGBUS to be thrown when + the mapping is subsequently written to. */ +static int +allocate_space (int fd, off_t offset, off_t len) +{ + static size_t page_size; + + /* Obtain system page size. */ + if (!page_size) + page_size = sysconf(_SC_PAGESIZE); + + unsigned char buf[page_size]; + memset (buf, 0, page_size); + + while (len > 0) + { + off_t to_write = (len < page_size) ? len : page_size; + if (write (fd, buf, to_write) < to_write) + return -1; + len -= to_write; + } + + return 0; +} + +/* Map in a chunk of memory from the temporary exec file into separate + locations in the virtual memory address space, one writable and one + executable. Returns the address of the writable portion, after + storing an offset to the corresponding executable portion at the + last word of the requested chunk. */ +static void * +dlmmap_locked (void *start, size_t length, int prot, int flags, off_t offset) +{ + void *ptr; + + if (execfd == -1) + { + open_temp_exec_file_opts_idx = 0; + retry_open: + execfd = open_temp_exec_file (); + if (execfd == -1) + return MFAIL; + } + + offset = execsize; + + if (allocate_space (execfd, offset, length)) + return MFAIL; + + flags &= ~(MAP_PRIVATE | MAP_ANONYMOUS); + flags |= MAP_SHARED; + + ptr = mmap (NULL, length, (prot & ~PROT_WRITE) | PROT_EXEC, + flags, execfd, offset); + if (ptr == MFAIL) + { + if (!offset) + { + close (execfd); + goto retry_open; + } + ftruncate (execfd, offset); + return MFAIL; + } + else if (!offset + && open_temp_exec_file_opts[open_temp_exec_file_opts_idx].repeat) + open_temp_exec_file_opts_next (); + + start = mmap (start, length, prot, flags, execfd, offset); + + if (start == MFAIL) + { + munmap (ptr, length); + ftruncate (execfd, offset); + return start; + } + + mmap_exec_offset ((char *)start, length) = (char*)ptr - (char*)start; + + execsize += length; + + return start; +} + +/* Map in a writable and executable chunk of memory if possible. + Failing that, fall back to dlmmap_locked. */ +static void * +dlmmap (void *start, size_t length, int prot, + int flags, int fd, off_t offset) +{ + void *ptr; + + assert (start == NULL && length % malloc_getpagesize == 0 + && prot == (PROT_READ | PROT_WRITE) + && flags == (MAP_PRIVATE | MAP_ANONYMOUS) + && fd == -1 && offset == 0); + + if (execfd == -1 && is_emutramp_enabled ()) + { + ptr = mmap (start, length, prot & ~PROT_EXEC, flags, fd, offset); + return ptr; + } + + if (execfd == -1 && !is_selinux_enabled ()) + { + ptr = mmap (start, length, prot | PROT_EXEC, flags, fd, offset); + + if (ptr != MFAIL || (errno != EPERM && errno != EACCES)) + /* Cool, no need to mess with separate segments. */ + return ptr; + + /* If MREMAP_DUP is ever introduced and implemented, try mmap + with ((prot & ~PROT_WRITE) | PROT_EXEC) and mremap with + MREMAP_DUP and prot at this point. */ + } + + if (execsize == 0 || execfd == -1) + { + pthread_mutex_lock (&open_temp_exec_file_mutex); + ptr = dlmmap_locked (start, length, prot, flags, offset); + pthread_mutex_unlock (&open_temp_exec_file_mutex); + + return ptr; + } + + return dlmmap_locked (start, length, prot, flags, offset); +} + +/* Release memory at the given address, as well as the corresponding + executable page if it's separate. */ +static int +dlmunmap (void *start, size_t length) +{ + /* We don't bother decreasing execsize or truncating the file, since + we can't quite tell whether we're unmapping the end of the file. + We don't expect frequent deallocation anyway. If we did, we + could locate pages in the file by writing to the pages being + deallocated and checking that the file contents change. + Yuck. */ + msegmentptr seg = segment_holding (gm, start); + void *code; + + if (seg && (code = add_segment_exec_offset (start, seg)) != start) + { + int ret = munmap (code, length); + if (ret) + return ret; + } + + return munmap (start, length); +} + +#if FFI_CLOSURE_FREE_CODE +/* Return segment holding given code address. */ +static msegmentptr +segment_holding_code (mstate m, char* addr) +{ + msegmentptr sp = &m->seg; + for (;;) { + if (addr >= add_segment_exec_offset (sp->base, sp) + && addr < add_segment_exec_offset (sp->base, sp) + sp->size) + return sp; + if ((sp = sp->next) == 0) + return 0; + } +} +#endif + +#endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */ + +/* Allocate a chunk of memory with the given size. Returns a pointer + to the writable address, and sets *CODE to the executable + corresponding virtual address. */ +void * +ffi_closure_alloc (size_t size, void **code) +{ + void *ptr; + + if (!code) + return NULL; + + ptr = dlmalloc (size); + + if (ptr) + { + msegmentptr seg = segment_holding (gm, ptr); + + *code = add_segment_exec_offset (ptr, seg); + } + + return ptr; +} + +/* Release a chunk of memory allocated with ffi_closure_alloc. If + FFI_CLOSURE_FREE_CODE is nonzero, the given address can be the + writable or the executable address given. Otherwise, only the + writable address can be provided here. */ +void +ffi_closure_free (void *ptr) +{ +#if FFI_CLOSURE_FREE_CODE + msegmentptr seg = segment_holding_code (gm, ptr); + + if (seg) + ptr = sub_segment_exec_offset (ptr, seg); +#endif + + dlfree (ptr); +} + +# else /* ! FFI_MMAP_EXEC_WRIT */ + +/* On many systems, memory returned by malloc is writable and + executable, so just use it. */ + +#include + +void * +ffi_closure_alloc (size_t size, void **code) +{ + if (!code) + return NULL; + + return *code = malloc (size); +} + +void +ffi_closure_free (void *ptr) +{ + free (ptr); +} + +# endif /* ! FFI_MMAP_EXEC_WRIT */ +#endif /* FFI_CLOSURES */ + +#endif /* NetBSD with PROT_MPROTECT */ diff --git a/module/src/main/cpp/whale/src/libffi/debug.c b/module/src/main/cpp/whale/src/libffi/debug.c new file mode 100644 index 00000000..f3172b1e --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/debug.c @@ -0,0 +1,64 @@ +/* ----------------------------------------------------------------------- + debug.c - Copyright (c) 1996 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include + +/* General debugging routines */ + +void ffi_stop_here(void) +{ + /* This function is only useful for debugging purposes. + Place a breakpoint on ffi_stop_here to be notified of + significant events. */ +} + +/* This function should only be called via the FFI_ASSERT() macro */ + +void ffi_assert(char *expr, char *file, int line) +{ + fprintf(stderr, "ASSERTION FAILURE: %s at %s:%d\n", expr, file, line); + ffi_stop_here(); + abort(); +} + +/* Perform a sanity check on an ffi_type structure */ + +void ffi_type_test(ffi_type *a, char *file, int line) +{ + FFI_ASSERT_AT(a != NULL, file, line); + + FFI_ASSERT_AT(a->type <= FFI_TYPE_LAST, file, line); + FFI_ASSERT_AT(a->type == FFI_TYPE_VOID || a->size > 0, file, line); + FFI_ASSERT_AT(a->type == FFI_TYPE_VOID || a->alignment > 0, file, line); + FFI_ASSERT_AT((a->type != FFI_TYPE_STRUCT && a->type != FFI_TYPE_COMPLEX) + || a->elements != NULL, file, line); + FFI_ASSERT_AT(a->type != FFI_TYPE_COMPLEX + || (a->elements != NULL + && a->elements[0] != NULL && a->elements[1] == NULL), + file, line); + +} diff --git a/module/src/main/cpp/whale/src/libffi/dlmalloc.c b/module/src/main/cpp/whale/src/libffi/dlmalloc.c new file mode 100644 index 00000000..f0c7698b --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/dlmalloc.c @@ -0,0 +1,5169 @@ +/* + This is a version (aka dlmalloc) of malloc/free/realloc written by + Doug Lea and released to the public domain, as explained at + http://creativecommons.org/licenses/publicdomain. Send questions, + comments, complaints, performance data, etc to dl@cs.oswego.edu + +* Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee) + + Note: There may be an updated version of this malloc obtainable at + ftp://gee.cs.oswego.edu/pub/misc/malloc.c + Check before installing! + +* Quickstart + + This library is all in one file to simplify the most common usage: + ftp it, compile it (-O3), and link it into another program. All of + the compile-time options default to reasonable values for use on + most platforms. You might later want to step through various + compile-time and dynamic tuning options. + + For convenience, an include file for code using this malloc is at: + ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h + You don't really need this .h file unless you call functions not + defined in your system include files. The .h file contains only the + excerpts from this file needed for using this malloc on ANSI C/C++ + systems, so long as you haven't changed compile-time options about + naming and tuning parameters. If you do, then you can create your + own malloc.h that does include all settings by cutting at the point + indicated below. Note that you may already by default be using a C + library containing a malloc that is based on some version of this + malloc (for example in linux). You might still want to use the one + in this file to customize settings or to avoid overheads associated + with library versions. + +* Vital statistics: + + Supported pointer/size_t representation: 4 or 8 bytes + size_t MUST be an unsigned type of the same width as + pointers. (If you are using an ancient system that declares + size_t as a signed type, or need it to be a different width + than pointers, you can use a previous release of this malloc + (e.g. 2.7.2) supporting these.) + + Alignment: 8 bytes (default) + This suffices for nearly all current machines and C compilers. + However, you can define MALLOC_ALIGNMENT to be wider than this + if necessary (up to 128bytes), at the expense of using more space. + + Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes) + 8 or 16 bytes (if 8byte sizes) + Each malloced chunk has a hidden word of overhead holding size + and status information, and additional cross-check word + if FOOTERS is defined. + + Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead) + 8-byte ptrs: 32 bytes (including overhead) + + Even a request for zero bytes (i.e., malloc(0)) returns a + pointer to something of the minimum allocatable size. + The maximum overhead wastage (i.e., number of extra bytes + allocated than were requested in malloc) is less than or equal + to the minimum size, except for requests >= mmap_threshold that + are serviced via mmap(), where the worst case wastage is about + 32 bytes plus the remainder from a system page (the minimal + mmap unit); typically 4096 or 8192 bytes. + + Security: static-safe; optionally more or less + The "security" of malloc refers to the ability of malicious + code to accentuate the effects of errors (for example, freeing + space that is not currently malloc'ed or overwriting past the + ends of chunks) in code that calls malloc. This malloc + guarantees not to modify any memory locations below the base of + heap, i.e., static variables, even in the presence of usage + errors. The routines additionally detect most improper frees + and reallocs. All this holds as long as the static bookkeeping + for malloc itself is not corrupted by some other means. This + is only one aspect of security -- these checks do not, and + cannot, detect all possible programming errors. + + If FOOTERS is defined nonzero, then each allocated chunk + carries an additional check word to verify that it was malloced + from its space. These check words are the same within each + execution of a program using malloc, but differ across + executions, so externally crafted fake chunks cannot be + freed. This improves security by rejecting frees/reallocs that + could corrupt heap memory, in addition to the checks preventing + writes to statics that are always on. This may further improve + security at the expense of time and space overhead. (Note that + FOOTERS may also be worth using with MSPACES.) + + By default detected errors cause the program to abort (calling + "abort()"). You can override this to instead proceed past + errors by defining PROCEED_ON_ERROR. In this case, a bad free + has no effect, and a malloc that encounters a bad address + caused by user overwrites will ignore the bad address by + dropping pointers and indices to all known memory. This may + be appropriate for programs that should continue if at all + possible in the face of programming errors, although they may + run out of memory because dropped memory is never reclaimed. + + If you don't like either of these options, you can define + CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything + else. And if if you are sure that your program using malloc has + no errors or vulnerabilities, you can define INSECURE to 1, + which might (or might not) provide a small performance improvement. + + Thread-safety: NOT thread-safe unless USE_LOCKS defined + When USE_LOCKS is defined, each public call to malloc, free, + etc is surrounded with either a pthread mutex or a win32 + spinlock (depending on WIN32). This is not especially fast, and + can be a major bottleneck. It is designed only to provide + minimal protection in concurrent environments, and to provide a + basis for extensions. If you are using malloc in a concurrent + program, consider instead using ptmalloc, which is derived from + a version of this malloc. (See http://www.malloc.de). + + System requirements: Any combination of MORECORE and/or MMAP/MUNMAP + This malloc can use unix sbrk or any emulation (invoked using + the CALL_MORECORE macro) and/or mmap/munmap or any emulation + (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system + memory. On most unix systems, it tends to work best if both + MORECORE and MMAP are enabled. On Win32, it uses emulations + based on VirtualAlloc. It also uses common C library functions + like memset. + + Compliance: I believe it is compliant with the Single Unix Specification + (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably + others as well. + +* Overview of algorithms + + This is not the fastest, most space-conserving, most portable, or + most tunable malloc ever written. However it is among the fastest + while also being among the most space-conserving, portable and + tunable. Consistent balance across these factors results in a good + general-purpose allocator for malloc-intensive programs. + + In most ways, this malloc is a best-fit allocator. Generally, it + chooses the best-fitting existing chunk for a request, with ties + broken in approximately least-recently-used order. (This strategy + normally maintains low fragmentation.) However, for requests less + than 256bytes, it deviates from best-fit when there is not an + exactly fitting available chunk by preferring to use space adjacent + to that used for the previous small request, as well as by breaking + ties in approximately most-recently-used order. (These enhance + locality of series of small allocations.) And for very large requests + (>= 256Kb by default), it relies on system memory mapping + facilities, if supported. (This helps avoid carrying around and + possibly fragmenting memory used only for large chunks.) + + All operations (except malloc_stats and mallinfo) have execution + times that are bounded by a constant factor of the number of bits in + a size_t, not counting any clearing in calloc or copying in realloc, + or actions surrounding MORECORE and MMAP that have times + proportional to the number of non-contiguous regions returned by + system allocation routines, which is often just 1. + + The implementation is not very modular and seriously overuses + macros. Perhaps someday all C compilers will do as good a job + inlining modular code as can now be done by brute-force expansion, + but now, enough of them seem not to. + + Some compilers issue a lot of warnings about code that is + dead/unreachable only on some platforms, and also about intentional + uses of negation on unsigned types. All known cases of each can be + ignored. + + For a longer but out of date high-level description, see + http://gee.cs.oswego.edu/dl/html/malloc.html + +* MSPACES + If MSPACES is defined, then in addition to malloc, free, etc., + this file also defines mspace_malloc, mspace_free, etc. These + are versions of malloc routines that take an "mspace" argument + obtained using create_mspace, to control all internal bookkeeping. + If ONLY_MSPACES is defined, only these versions are compiled. + So if you would like to use this allocator for only some allocations, + and your system malloc for others, you can compile with + ONLY_MSPACES and then do something like... + static mspace mymspace = create_mspace(0,0); // for example + #define mymalloc(bytes) mspace_malloc(mymspace, bytes) + + (Note: If you only need one instance of an mspace, you can instead + use "USE_DL_PREFIX" to relabel the global malloc.) + + You can similarly create thread-local allocators by storing + mspaces as thread-locals. For example: + static __thread mspace tlms = 0; + void* tlmalloc(size_t bytes) { + if (tlms == 0) tlms = create_mspace(0, 0); + return mspace_malloc(tlms, bytes); + } + void tlfree(void* mem) { mspace_free(tlms, mem); } + + Unless FOOTERS is defined, each mspace is completely independent. + You cannot allocate from one and free to another (although + conformance is only weakly checked, so usage errors are not always + caught). If FOOTERS is defined, then each chunk carries around a tag + indicating its originating mspace, and frees are directed to their + originating spaces. + + ------------------------- Compile-time options --------------------------- + +Be careful in setting #define values for numerical constants of type +size_t. On some systems, literal values are not automatically extended +to size_t precision unless they are explicitly casted. + +WIN32 default: defined if _WIN32 defined + Defining WIN32 sets up defaults for MS environment and compilers. + Otherwise defaults are for unix. + +MALLOC_ALIGNMENT default: (size_t)8 + Controls the minimum alignment for malloc'ed chunks. It must be a + power of two and at least 8, even on machines for which smaller + alignments would suffice. It may be defined as larger than this + though. Note however that code and data structures are optimized for + the case of 8-byte alignment. + +MSPACES default: 0 (false) + If true, compile in support for independent allocation spaces. + This is only supported if HAVE_MMAP is true. + +ONLY_MSPACES default: 0 (false) + If true, only compile in mspace versions, not regular versions. + +USE_LOCKS default: 0 (false) + Causes each call to each public routine to be surrounded with + pthread or WIN32 mutex lock/unlock. (If set true, this can be + overridden on a per-mspace basis for mspace versions.) + +FOOTERS default: 0 + If true, provide extra checking and dispatching by placing + information in the footers of allocated chunks. This adds + space and time overhead. + +INSECURE default: 0 + If true, omit checks for usage errors and heap space overwrites. + +USE_DL_PREFIX default: NOT defined + Causes compiler to prefix all public routines with the string 'dl'. + This can be useful when you only want to use this malloc in one part + of a program, using your regular system malloc elsewhere. + +ABORT default: defined as abort() + Defines how to abort on failed checks. On most systems, a failed + check cannot die with an "assert" or even print an informative + message, because the underlying print routines in turn call malloc, + which will fail again. Generally, the best policy is to simply call + abort(). It's not very useful to do more than this because many + errors due to overwriting will show up as address faults (null, odd + addresses etc) rather than malloc-triggered checks, so will also + abort. Also, most compilers know that abort() does not return, so + can better optimize code conditionally calling it. + +PROCEED_ON_ERROR default: defined as 0 (false) + Controls whether detected bad addresses cause them to bypassed + rather than aborting. If set, detected bad arguments to free and + realloc are ignored. And all bookkeeping information is zeroed out + upon a detected overwrite of freed heap space, thus losing the + ability to ever return it from malloc again, but enabling the + application to proceed. If PROCEED_ON_ERROR is defined, the + static variable malloc_corruption_error_count is compiled in + and can be examined to see if errors have occurred. This option + generates slower code than the default abort policy. + +DEBUG default: NOT defined + The DEBUG setting is mainly intended for people trying to modify + this code or diagnose problems when porting to new platforms. + However, it may also be able to better isolate user errors than just + using runtime checks. The assertions in the check routines spell + out in more detail the assumptions and invariants underlying the + algorithms. The checking is fairly extensive, and will slow down + execution noticeably. Calling malloc_stats or mallinfo with DEBUG + set will attempt to check every non-mmapped allocated and free chunk + in the course of computing the summaries. + +ABORT_ON_ASSERT_FAILURE default: defined as 1 (true) + Debugging assertion failures can be nearly impossible if your + version of the assert macro causes malloc to be called, which will + lead to a cascade of further failures, blowing the runtime stack. + ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(), + which will usually make debugging easier. + +MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32 + The action to take before "return 0" when malloc fails to be able to + return memory because there is none available. + +HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES + True if this system supports sbrk or an emulation of it. + +MORECORE default: sbrk + The name of the sbrk-style system routine to call to obtain more + memory. See below for guidance on writing custom MORECORE + functions. The type of the argument to sbrk/MORECORE varies across + systems. It cannot be size_t, because it supports negative + arguments, so it is normally the signed type of the same width as + size_t (sometimes declared as "intptr_t"). It doesn't much matter + though. Internally, we only call it with arguments less than half + the max value of a size_t, which should work across all reasonable + possibilities, although sometimes generating compiler warnings. See + near the end of this file for guidelines for creating a custom + version of MORECORE. + +MORECORE_CONTIGUOUS default: 1 (true) + If true, take advantage of fact that consecutive calls to MORECORE + with positive arguments always return contiguous increasing + addresses. This is true of unix sbrk. It does not hurt too much to + set it true anyway, since malloc copes with non-contiguities. + Setting it false when definitely non-contiguous saves time + and possibly wasted space it would take to discover this though. + +MORECORE_CANNOT_TRIM default: NOT defined + True if MORECORE cannot release space back to the system when given + negative arguments. This is generally necessary only if you are + using a hand-crafted MORECORE function that cannot handle negative + arguments. + +HAVE_MMAP default: 1 (true) + True if this system supports mmap or an emulation of it. If so, and + HAVE_MORECORE is not true, MMAP is used for all system + allocation. If set and HAVE_MORECORE is true as well, MMAP is + primarily used to directly allocate very large blocks. It is also + used as a backup strategy in cases where MORECORE fails to provide + space from system. Note: A single call to MUNMAP is assumed to be + able to unmap memory that may have be allocated using multiple calls + to MMAP, so long as they are adjacent. + +HAVE_MREMAP default: 1 on linux, else 0 + If true realloc() uses mremap() to re-allocate large blocks and + extend or shrink allocation spaces. + +MMAP_CLEARS default: 1 on unix + True if mmap clears memory so calloc doesn't need to. This is true + for standard unix mmap using /dev/zero. + +USE_BUILTIN_FFS default: 0 (i.e., not used) + Causes malloc to use the builtin ffs() function to compute indices. + Some compilers may recognize and intrinsify ffs to be faster than the + supplied C version. Also, the case of x86 using gcc is special-cased + to an asm instruction, so is already as fast as it can be, and so + this setting has no effect. (On most x86s, the asm version is only + slightly faster than the C version.) + +malloc_getpagesize default: derive from system includes, or 4096. + The system page size. To the extent possible, this malloc manages + memory from the system in page-size units. This may be (and + usually is) a function rather than a constant. This is ignored + if WIN32, where page size is determined using getSystemInfo during + initialization. + +USE_DEV_RANDOM default: 0 (i.e., not used) + Causes malloc to use /dev/random to initialize secure magic seed for + stamping footers. Otherwise, the current time is used. + +NO_MALLINFO default: 0 + If defined, don't compile "mallinfo". This can be a simple way + of dealing with mismatches between system declarations and + those in this file. + +MALLINFO_FIELD_TYPE default: size_t + The type of the fields in the mallinfo struct. This was originally + defined as "int" in SVID etc, but is more usefully defined as + size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set + +REALLOC_ZERO_BYTES_FREES default: not defined + This should be set if a call to realloc with zero bytes should + be the same as a call to free. Some people think it should. Otherwise, + since this malloc returns a unique pointer for malloc(0), so does + realloc(p, 0). + +LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H +LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H +LACKS_STDLIB_H default: NOT defined unless on WIN32 + Define these if your system does not have these header files. + You might need to manually insert some of the declarations they provide. + +DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS, + system_info.dwAllocationGranularity in WIN32, + otherwise 64K. + Also settable using mallopt(M_GRANULARITY, x) + The unit for allocating and deallocating memory from the system. On + most systems with contiguous MORECORE, there is no reason to + make this more than a page. However, systems with MMAP tend to + either require or encourage larger granularities. You can increase + this value to prevent system allocation functions to be called so + often, especially if they are slow. The value must be at least one + page and must be a power of two. Setting to 0 causes initialization + to either page size or win32 region size. (Note: In previous + versions of malloc, the equivalent of this option was called + "TOP_PAD") + +DEFAULT_TRIM_THRESHOLD default: 2MB + Also settable using mallopt(M_TRIM_THRESHOLD, x) + The maximum amount of unused top-most memory to keep before + releasing via malloc_trim in free(). Automatic trimming is mainly + useful in long-lived programs using contiguous MORECORE. Because + trimming via sbrk can be slow on some systems, and can sometimes be + wasteful (in cases where programs immediately afterward allocate + more large chunks) the value should be high enough so that your + overall system performance would improve by releasing this much + memory. As a rough guide, you might set to a value close to the + average size of a process (program) running on your system. + Releasing this much memory would allow such a process to run in + memory. Generally, it is worth tuning trim thresholds when a + program undergoes phases where several large chunks are allocated + and released in ways that can reuse each other's storage, perhaps + mixed with phases where there are no such chunks at all. The trim + value must be greater than page size to have any useful effect. To + disable trimming completely, you can set to MAX_SIZE_T. Note that the trick + some people use of mallocing a huge space and then freeing it at + program startup, in an attempt to reserve system memory, doesn't + have the intended effect under automatic trimming, since that memory + will immediately be returned to the system. + +DEFAULT_MMAP_THRESHOLD default: 256K + Also settable using mallopt(M_MMAP_THRESHOLD, x) + The request size threshold for using MMAP to directly service a + request. Requests of at least this size that cannot be allocated + using already-existing space will be serviced via mmap. (If enough + normal freed space already exists it is used instead.) Using mmap + segregates relatively large chunks of memory so that they can be + individually obtained and released from the host system. A request + serviced through mmap is never reused by any other request (at least + not directly; the system may just so happen to remap successive + requests to the same locations). Segregating space in this way has + the benefits that: Mmapped space can always be individually released + back to the system, which helps keep the system level memory demands + of a long-lived program low. Also, mapped memory doesn't become + `locked' between other chunks, as can happen with normally allocated + chunks, which means that even trimming via malloc_trim would not + release them. However, it has the disadvantage that the space + cannot be reclaimed, consolidated, and then used to service later + requests, as happens with normal chunks. The advantages of mmap + nearly always outweigh disadvantages for "large" chunks, but the + value of "large" may vary across systems. The default is an + empirically derived value that works well in most systems. You can + disable mmap by setting to MAX_SIZE_T. + +*/ + +#if defined __linux__ && !defined _GNU_SOURCE +/* mremap() on Linux requires this via sys/mman.h */ +#define _GNU_SOURCE 1 +#endif + +#ifndef WIN32 +#ifdef _WIN32 +#define WIN32 1 +#endif /* _WIN32 */ +#endif /* WIN32 */ +#ifdef WIN32 +#define WIN32_LEAN_AND_MEAN +#include +#define HAVE_MMAP 1 +#define HAVE_MORECORE 0 +#define LACKS_UNISTD_H +#define LACKS_SYS_PARAM_H +#define LACKS_SYS_MMAN_H +#define LACKS_STRING_H +#define LACKS_STRINGS_H +#define LACKS_SYS_TYPES_H +#define LACKS_ERRNO_H +#define MALLOC_FAILURE_ACTION +#define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */ +#endif /* WIN32 */ + +#ifdef __OS2__ +#define INCL_DOS +#include +#define HAVE_MMAP 1 +#define HAVE_MORECORE 0 +#define LACKS_SYS_MMAN_H +#endif /* __OS2__ */ + +#if defined(DARWIN) || defined(_DARWIN) +/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ +#ifndef HAVE_MORECORE +#define HAVE_MORECORE 0 +#define HAVE_MMAP 1 +#endif /* HAVE_MORECORE */ +#endif /* DARWIN */ + +#ifndef LACKS_SYS_TYPES_H +#include /* For size_t */ +#endif /* LACKS_SYS_TYPES_H */ + +/* The maximum possible size_t value has all bits set */ +#define MAX_SIZE_T (~(size_t)0) + +#ifndef ONLY_MSPACES +#define ONLY_MSPACES 0 +#endif /* ONLY_MSPACES */ +#ifndef MSPACES +#if ONLY_MSPACES +#define MSPACES 1 +#else /* ONLY_MSPACES */ +#define MSPACES 0 +#endif /* ONLY_MSPACES */ +#endif /* MSPACES */ +#ifndef MALLOC_ALIGNMENT +#define MALLOC_ALIGNMENT ((size_t)8U) +#endif /* MALLOC_ALIGNMENT */ +#ifndef FOOTERS +#define FOOTERS 0 +#endif /* FOOTERS */ +#ifndef ABORT +#define ABORT abort() +#endif /* ABORT */ +#ifndef ABORT_ON_ASSERT_FAILURE +#define ABORT_ON_ASSERT_FAILURE 1 +#endif /* ABORT_ON_ASSERT_FAILURE */ +#ifndef PROCEED_ON_ERROR +#define PROCEED_ON_ERROR 0 +#endif /* PROCEED_ON_ERROR */ +#ifndef USE_LOCKS +#define USE_LOCKS 0 +#endif /* USE_LOCKS */ +#ifndef INSECURE +#define INSECURE 0 +#endif /* INSECURE */ +#ifndef HAVE_MMAP +#define HAVE_MMAP 1 +#endif /* HAVE_MMAP */ +#ifndef MMAP_CLEARS +#define MMAP_CLEARS 1 +#endif /* MMAP_CLEARS */ +#ifndef HAVE_MREMAP +#ifdef linux +#define HAVE_MREMAP 1 +#else /* linux */ +#define HAVE_MREMAP 0 +#endif /* linux */ +#endif /* HAVE_MREMAP */ +#ifndef MALLOC_FAILURE_ACTION +#define MALLOC_FAILURE_ACTION errno = ENOMEM; +#endif /* MALLOC_FAILURE_ACTION */ +#ifndef HAVE_MORECORE +#if ONLY_MSPACES +#define HAVE_MORECORE 0 +#else /* ONLY_MSPACES */ +#define HAVE_MORECORE 1 +#endif /* ONLY_MSPACES */ +#endif /* HAVE_MORECORE */ +#if !HAVE_MORECORE +#define MORECORE_CONTIGUOUS 0 +#else /* !HAVE_MORECORE */ +#ifndef MORECORE +#define MORECORE sbrk +#endif /* MORECORE */ +#ifndef MORECORE_CONTIGUOUS +#define MORECORE_CONTIGUOUS 1 +#endif /* MORECORE_CONTIGUOUS */ +#endif /* HAVE_MORECORE */ +#ifndef DEFAULT_GRANULARITY +#if MORECORE_CONTIGUOUS +#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ +#else /* MORECORE_CONTIGUOUS */ +#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) +#endif /* MORECORE_CONTIGUOUS */ +#endif /* DEFAULT_GRANULARITY */ +#ifndef DEFAULT_TRIM_THRESHOLD +#ifndef MORECORE_CANNOT_TRIM +#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) +#else /* MORECORE_CANNOT_TRIM */ +#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T +#endif /* MORECORE_CANNOT_TRIM */ +#endif /* DEFAULT_TRIM_THRESHOLD */ +#ifndef DEFAULT_MMAP_THRESHOLD +#if HAVE_MMAP +#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) +#else /* HAVE_MMAP */ +#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T +#endif /* HAVE_MMAP */ +#endif /* DEFAULT_MMAP_THRESHOLD */ +#ifndef USE_BUILTIN_FFS +#define USE_BUILTIN_FFS 0 +#endif /* USE_BUILTIN_FFS */ +#ifndef USE_DEV_RANDOM +#define USE_DEV_RANDOM 0 +#endif /* USE_DEV_RANDOM */ +#ifndef NO_MALLINFO +#define NO_MALLINFO 0 +#endif /* NO_MALLINFO */ +#ifndef MALLINFO_FIELD_TYPE +#define MALLINFO_FIELD_TYPE size_t +#endif /* MALLINFO_FIELD_TYPE */ + +/* + mallopt tuning options. SVID/XPG defines four standard parameter + numbers for mallopt, normally defined in malloc.h. None of these + are used in this malloc, so setting them has no effect. But this + malloc does support the following options. +*/ + +#define M_TRIM_THRESHOLD (-1) +#define M_GRANULARITY (-2) +#define M_MMAP_THRESHOLD (-3) + +/* ------------------------ Mallinfo declarations ------------------------ */ + +#if !NO_MALLINFO +/* + This version of malloc supports the standard SVID/XPG mallinfo + routine that returns a struct containing usage properties and + statistics. It should work on any system that has a + /usr/include/malloc.h defining struct mallinfo. The main + declaration needed is the mallinfo struct that is returned (by-copy) + by mallinfo(). The malloinfo struct contains a bunch of fields that + are not even meaningful in this version of malloc. These fields are + are instead filled by mallinfo() with other numbers that might be of + interest. + + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a + /usr/include/malloc.h file that includes a declaration of struct + mallinfo. If so, it is included; else a compliant version is + declared below. These must be precisely the same for mallinfo() to + work. The original SVID version of this struct, defined on most + systems with mallinfo, declares all fields as ints. But some others + define as unsigned long. If your system defines the fields using a + type of different width than listed here, you MUST #include your + system version and #define HAVE_USR_INCLUDE_MALLOC_H. +*/ +#ifndef __APPLE__ +#define HAVE_USR_INCLUDE_MALLOC_H +#endif +#ifdef HAVE_USR_INCLUDE_MALLOC_H +#include "malloc.h" +#else /* HAVE_USR_INCLUDE_MALLOC_H */ + +/* HP-UX's stdlib.h redefines mallinfo unless _STRUCT_MALLINFO is defined */ +#define _STRUCT_MALLINFO + +struct mallinfo { + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ + MALLINFO_FIELD_TYPE smblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ + MALLINFO_FIELD_TYPE fordblks; /* total free space */ + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ +}; + +#endif /* HAVE_USR_INCLUDE_MALLOC_H */ +#endif /* NO_MALLINFO */ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#if !ONLY_MSPACES + +/* ------------------- Declarations of public routines ------------------- */ + +#ifndef USE_DL_PREFIX +#define USE_DL_PREFIX +#endif +#ifndef USE_DL_PREFIX +#define dlcalloc calloc +#define dlfree free +#define dlmalloc malloc +#define dlmemalign memalign +#define dlrealloc realloc +#define dlvalloc valloc +#define dlpvalloc pvalloc +#define dlmallinfo mallinfo +#define dlmallopt mallopt +#define dlmalloc_trim malloc_trim +#define dlmalloc_stats malloc_stats +#define dlmalloc_usable_size malloc_usable_size +#define dlmalloc_footprint malloc_footprint +#define dlmalloc_max_footprint malloc_max_footprint +#define dlindependent_calloc independent_calloc +#define dlindependent_comalloc independent_comalloc +#endif /* USE_DL_PREFIX */ + + +/* + malloc(size_t n) + Returns a pointer to a newly allocated chunk of at least n bytes, or + null if no space is available, in which case errno is set to ENOMEM + on ANSI C systems. + + If n is zero, malloc returns a minimum-sized chunk. (The minimum + size is 16 bytes on most 32bit systems, and 32 bytes on 64bit + systems.) Note that size_t is an unsigned type, so calls with + arguments that would be negative if signed are interpreted as + requests for huge amounts of space, which will often fail. The + maximum supported value of n differs across systems, but is in all + cases less than the maximum representable value of a size_t. +*/ +void* dlmalloc(size_t); + +/* + free(void* p) + Releases the chunk of memory pointed to by p, that had been previously + allocated using malloc or a related routine such as realloc. + It has no effect if p is null. If p was not malloced or already + freed, free(p) will by default cause the current program to abort. +*/ +void dlfree(void*); + +/* + calloc(size_t n_elements, size_t element_size); + Returns a pointer to n_elements * element_size bytes, with all locations + set to zero. +*/ +void* dlcalloc(size_t, size_t); + +/* + realloc(void* p, size_t n) + Returns a pointer to a chunk of size n that contains the same data + as does chunk p up to the minimum of (n, p's size) bytes, or null + if no space is available. + + The returned pointer may or may not be the same as p. The algorithm + prefers extending p in most cases when possible, otherwise it + employs the equivalent of a malloc-copy-free sequence. + + If p is null, realloc is equivalent to malloc. + + If space is not available, realloc returns null, errno is set (if on + ANSI) and p is NOT freed. + + if n is for fewer bytes than already held by p, the newly unused + space is lopped off and freed if possible. realloc with a size + argument of zero (re)allocates a minimum-sized chunk. + + The old unix realloc convention of allowing the last-free'd chunk + to be used as an argument to realloc is not supported. +*/ + +void* dlrealloc(void*, size_t); + +/* + memalign(size_t alignment, size_t n); + Returns a pointer to a newly allocated chunk of n bytes, aligned + in accord with the alignment argument. + + The alignment argument should be a power of two. If the argument is + not a power of two, the nearest greater power is used. + 8-byte alignment is guaranteed by normal malloc calls, so don't + bother calling memalign with an argument of 8 or less. + + Overreliance on memalign is a sure way to fragment space. +*/ +void* dlmemalign(size_t, size_t); + +/* + valloc(size_t n); + Equivalent to memalign(pagesize, n), where pagesize is the page + size of the system. If the pagesize is unknown, 4096 is used. +*/ +void* dlvalloc(size_t); + +/* + mallopt(int parameter_number, int parameter_value) + Sets tunable parameters The format is to provide a + (parameter-number, parameter-value) pair. mallopt then sets the + corresponding parameter to the argument value if it can (i.e., so + long as the value is meaningful), and returns 1 if successful else + 0. SVID/XPG/ANSI defines four standard param numbers for mallopt, + normally defined in malloc.h. None of these are use in this malloc, + so setting them has no effect. But this malloc also supports other + options in mallopt. See below for details. Briefly, supported + parameters are as follows (listed defaults are for "typical" + configurations). + + Symbol param # default allowed param values + M_TRIM_THRESHOLD -1 2*1024*1024 any (MAX_SIZE_T disables) + M_GRANULARITY -2 page size any power of 2 >= page size + M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) +*/ +int dlmallopt(int, int); + +/* + malloc_footprint(); + Returns the number of bytes obtained from the system. The total + number of bytes allocated by malloc, realloc etc., is less than this + value. Unlike mallinfo, this function returns only a precomputed + result, so can be called frequently to monitor memory consumption. + Even if locks are otherwise defined, this function does not use them, + so results might not be up to date. +*/ +size_t dlmalloc_footprint(void); + +/* + malloc_max_footprint(); + Returns the maximum number of bytes obtained from the system. This + value will be greater than current footprint if deallocated space + has been reclaimed by the system. The peak number of bytes allocated + by malloc, realloc etc., is less than this value. Unlike mallinfo, + this function returns only a precomputed result, so can be called + frequently to monitor memory consumption. Even if locks are + otherwise defined, this function does not use them, so results might + not be up to date. +*/ +size_t dlmalloc_max_footprint(void); + +#if !NO_MALLINFO +/* + mallinfo() + Returns (by copy) a struct containing various summary statistics: + + arena: current total non-mmapped bytes allocated from system + ordblks: the number of free chunks + smblks: always zero. + hblks: current number of mmapped regions + hblkhd: total bytes held in mmapped regions + usmblks: the maximum total allocated space. This will be greater + than current total if trimming has occurred. + fsmblks: always zero + uordblks: current total allocated space (normal or mmapped) + fordblks: total free space + keepcost: the maximum number of bytes that could ideally be released + back to system via malloc_trim. ("ideally" means that + it ignores page restrictions etc.) + + Because these fields are ints, but internal bookkeeping may + be kept as longs, the reported values may wrap around zero and + thus be inaccurate. +*/ +struct mallinfo dlmallinfo(void); +#endif /* NO_MALLINFO */ + +/* + independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); + + independent_calloc is similar to calloc, but instead of returning a + single cleared space, it returns an array of pointers to n_elements + independent elements that can hold contents of size elem_size, each + of which starts out cleared, and can be independently freed, + realloc'ed etc. The elements are guaranteed to be adjacently + allocated (this is not guaranteed to occur with multiple callocs or + mallocs), which may also improve cache locality in some + applications. + + The "chunks" argument is optional (i.e., may be null, which is + probably the most typical usage). If it is null, the returned array + is itself dynamically allocated and should also be freed when it is + no longer needed. Otherwise, the chunks array must be of at least + n_elements in length. It is filled in with the pointers to the + chunks. + + In either case, independent_calloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and "chunks" + is null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be individually freed when it is no longer + needed. If you'd like to instead be able to free all at once, you + should instead use regular calloc and assign pointers into this + space to represent elements. (In this case though, you cannot + independently free elements.) + + independent_calloc simplifies and speeds up implementations of many + kinds of pools. It may also be useful when constructing large data + structures that initially have a fixed number of fixed-sized nodes, + but the number is not known at compile time, and some of the nodes + may later need to be freed. For example: + + struct Node { int item; struct Node* next; }; + + struct Node* build_list() { + struct Node** pool; + int n = read_number_of_nodes_needed(); + if (n <= 0) return 0; + pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); + if (pool == 0) die(); + // organize into a linked list... + struct Node* first = pool[0]; + for (i = 0; i < n-1; ++i) + pool[i]->next = pool[i+1]; + free(pool); // Can now free the array (or not, if it is needed later) + return first; + } +*/ +void** dlindependent_calloc(size_t, size_t, void**); + +/* + independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); + + independent_comalloc allocates, all at once, a set of n_elements + chunks with sizes indicated in the "sizes" array. It returns + an array of pointers to these elements, each of which can be + independently freed, realloc'ed etc. The elements are guaranteed to + be adjacently allocated (this is not guaranteed to occur with + multiple callocs or mallocs), which may also improve cache locality + in some applications. + + The "chunks" argument is optional (i.e., may be null). If it is null + the returned array is itself dynamically allocated and should also + be freed when it is no longer needed. Otherwise, the chunks array + must be of at least n_elements in length. It is filled in with the + pointers to the chunks. + + In either case, independent_comalloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and chunks is + null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be individually freed when it is no longer + needed. If you'd like to instead be able to free all at once, you + should instead use a single regular malloc, and assign pointers at + particular offsets in the aggregate space. (In this case though, you + cannot independently free elements.) + + independent_comallac differs from independent_calloc in that each + element may have a different size, and also that it does not + automatically clear elements. + + independent_comalloc can be used to speed up allocation in cases + where several structs or objects must always be allocated at the + same time. For example: + + struct Head { ... } + struct Foot { ... } + + void send_message(char* msg) { + int msglen = strlen(msg); + size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; + void* chunks[3]; + if (independent_comalloc(3, sizes, chunks) == 0) + die(); + struct Head* head = (struct Head*)(chunks[0]); + char* body = (char*)(chunks[1]); + struct Foot* foot = (struct Foot*)(chunks[2]); + // ... + } + + In general though, independent_comalloc is worth using only for + larger values of n_elements. For small values, you probably won't + detect enough difference from series of malloc calls to bother. + + Overuse of independent_comalloc can increase overall memory usage, + since it cannot reuse existing noncontiguous small chunks that + might be available for some of the elements. +*/ +void** dlindependent_comalloc(size_t, size_t*, void**); + + +/* + pvalloc(size_t n); + Equivalent to valloc(minimum-page-that-holds(n)), that is, + round up n to nearest pagesize. + */ +void* dlpvalloc(size_t); + +/* + malloc_trim(size_t pad); + + If possible, gives memory back to the system (via negative arguments + to sbrk) if there is unused memory at the `high' end of the malloc + pool or in unused MMAP segments. You can call this after freeing + large blocks of memory to potentially reduce the system-level memory + requirements of a program. However, it cannot guarantee to reduce + memory. Under some allocation patterns, some large free blocks of + memory will be locked between two used chunks, so they cannot be + given back to the system. + + The `pad' argument to malloc_trim represents the amount of free + trailing space to leave untrimmed. If this argument is zero, only + the minimum amount of memory to maintain internal data structures + will be left. Non-zero arguments can be supplied to maintain enough + trailing space to service future expected allocations without having + to re-obtain memory from the system. + + Malloc_trim returns 1 if it actually released any memory, else 0. +*/ +int dlmalloc_trim(size_t); + +/* + malloc_usable_size(void* p); + + Returns the number of bytes you can actually use in + an allocated chunk, which may be more than you requested (although + often not) due to alignment and minimum size constraints. + You can use this many bytes without worrying about + overwriting other allocated objects. This is not a particularly great + programming practice. malloc_usable_size can be more useful in + debugging and assertions, for example: + + p = malloc(n); + assert(malloc_usable_size(p) >= 256); +*/ +size_t dlmalloc_usable_size(void*); + +/* + malloc_stats(); + Prints on stderr the amount of space obtained from the system (both + via sbrk and mmap), the maximum amount (which may be more than + current if malloc_trim and/or munmap got called), and the current + number of bytes allocated via malloc (or realloc, etc) but not yet + freed. Note that this is the number of bytes allocated, not the + number requested. It will be larger than the number requested + because of alignment and bookkeeping overhead. Because it includes + alignment wastage as being in use, this figure may be greater than + zero even when no user-level chunks are allocated. + + The reported current and maximum system memory can be inaccurate if + a program makes other calls to system memory allocation functions + (normally sbrk) outside of malloc. + + malloc_stats prints only the most commonly interesting statistics. + More information can be obtained by calling mallinfo. +*/ +void dlmalloc_stats(void); + +#endif /* ONLY_MSPACES */ + +#if MSPACES + +/* + mspace is an opaque type representing an independent + region of space that supports mspace_malloc, etc. +*/ +typedef void* mspace; + +/* + create_mspace creates and returns a new independent space with the + given initial capacity, or, if 0, the default granularity size. It + returns null if there is no system memory available to create the + space. If argument locked is non-zero, the space uses a separate + lock to control access. The capacity of the space will grow + dynamically as needed to service mspace_malloc requests. You can + control the sizes of incremental increases of this space by + compiling with a different DEFAULT_GRANULARITY or dynamically + setting with mallopt(M_GRANULARITY, value). +*/ +mspace create_mspace(size_t capacity, int locked); + +/* + destroy_mspace destroys the given space, and attempts to return all + of its memory back to the system, returning the total number of + bytes freed. After destruction, the results of access to all memory + used by the space become undefined. +*/ +size_t destroy_mspace(mspace msp); + +/* + create_mspace_with_base uses the memory supplied as the initial base + of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this + space is used for bookkeeping, so the capacity must be at least this + large. (Otherwise 0 is returned.) When this initial space is + exhausted, additional memory will be obtained from the system. + Destroying this space will deallocate all additionally allocated + space (if possible) but not the initial base. +*/ +mspace create_mspace_with_base(void* base, size_t capacity, int locked); + +/* + mspace_malloc behaves as malloc, but operates within + the given space. +*/ +void* mspace_malloc(mspace msp, size_t bytes); + +/* + mspace_free behaves as free, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_free is not actually needed. + free may be called instead of mspace_free because freed chunks from + any space are handled by their originating spaces. +*/ +void mspace_free(mspace msp, void* mem); + +/* + mspace_realloc behaves as realloc, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_realloc is not actually + needed. realloc may be called instead of mspace_realloc because + realloced chunks from any space are handled by their originating + spaces. +*/ +void* mspace_realloc(mspace msp, void* mem, size_t newsize); + +/* + mspace_calloc behaves as calloc, but operates within + the given space. +*/ +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); + +/* + mspace_memalign behaves as memalign, but operates within + the given space. +*/ +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); + +/* + mspace_independent_calloc behaves as independent_calloc, but + operates within the given space. +*/ +void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]); + +/* + mspace_independent_comalloc behaves as independent_comalloc, but + operates within the given space. +*/ +void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]); + +/* + mspace_footprint() returns the number of bytes obtained from the + system for this space. +*/ +size_t mspace_footprint(mspace msp); + +/* + mspace_max_footprint() returns the peak number of bytes obtained from the + system for this space. +*/ +size_t mspace_max_footprint(mspace msp); + + +#if !NO_MALLINFO +/* + mspace_mallinfo behaves as mallinfo, but reports properties of + the given space. +*/ +struct mallinfo mspace_mallinfo(mspace msp); +#endif /* NO_MALLINFO */ + +/* + mspace_malloc_stats behaves as malloc_stats, but reports + properties of the given space. +*/ +void mspace_malloc_stats(mspace msp); + +/* + mspace_trim behaves as malloc_trim, but + operates within the given space. +*/ +int mspace_trim(mspace msp, size_t pad); + +/* + An alias for mallopt. +*/ +int mspace_mallopt(int, int); + +#endif /* MSPACES */ + +#ifdef __cplusplus +}; /* end of extern "C" */ +#endif /* __cplusplus */ + +/* + ======================================================================== + To make a fully customizable malloc.h header file, cut everything + above this line, put into file malloc.h, edit to suit, and #include it + on the next line, as well as in programs that use this malloc. + ======================================================================== +*/ + +/* #include "malloc.h" */ + +/*------------------------------ internal #includes ---------------------- */ + +#ifdef _MSC_VER +#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ +#endif /* _MSC_VER */ + +#include /* for printing in malloc_stats */ + +#ifndef LACKS_ERRNO_H +#include /* for MALLOC_FAILURE_ACTION */ +#endif /* LACKS_ERRNO_H */ +#if FOOTERS +#include /* for magic initialization */ +#endif /* FOOTERS */ +#ifndef LACKS_STDLIB_H +#include /* for abort() */ +#endif /* LACKS_STDLIB_H */ +#ifdef DEBUG +#if ABORT_ON_ASSERT_FAILURE +#define assert(x) if(!(x)) ABORT +#else /* ABORT_ON_ASSERT_FAILURE */ +#include +#endif /* ABORT_ON_ASSERT_FAILURE */ +#else /* DEBUG */ +#define assert(x) +#endif /* DEBUG */ +#ifndef LACKS_STRING_H +#include /* for memset etc */ +#endif /* LACKS_STRING_H */ +#if USE_BUILTIN_FFS +#ifndef LACKS_STRINGS_H +#include /* for ffs */ +#endif /* LACKS_STRINGS_H */ +#endif /* USE_BUILTIN_FFS */ +#if HAVE_MMAP +#ifndef LACKS_SYS_MMAN_H +#include /* for mmap */ +#endif /* LACKS_SYS_MMAN_H */ +#ifndef LACKS_FCNTL_H +#include +#endif /* LACKS_FCNTL_H */ +#endif /* HAVE_MMAP */ +#if HAVE_MORECORE +#ifndef LACKS_UNISTD_H +#include /* for sbrk */ +#else /* LACKS_UNISTD_H */ +#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) +extern void* sbrk(ptrdiff_t); +#endif /* FreeBSD etc */ +#endif /* LACKS_UNISTD_H */ +#endif /* HAVE_MMAP */ + +#ifndef WIN32 +#ifndef malloc_getpagesize +# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ +# ifndef _SC_PAGE_SIZE +# define _SC_PAGE_SIZE _SC_PAGESIZE +# endif +# endif +# ifdef _SC_PAGE_SIZE +# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) +# else +# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) + extern size_t getpagesize(); +# define malloc_getpagesize getpagesize() +# else +# ifdef WIN32 /* use supplied emulation of getpagesize */ +# define malloc_getpagesize getpagesize() +# else +# ifndef LACKS_SYS_PARAM_H +# include +# endif +# ifdef EXEC_PAGESIZE +# define malloc_getpagesize EXEC_PAGESIZE +# else +# ifdef NBPG +# ifndef CLSIZE +# define malloc_getpagesize NBPG +# else +# define malloc_getpagesize (NBPG * CLSIZE) +# endif +# else +# ifdef NBPC +# define malloc_getpagesize NBPC +# else +# ifdef PAGESIZE +# define malloc_getpagesize PAGESIZE +# else /* just guess */ +# define malloc_getpagesize ((size_t)4096U) +# endif +# endif +# endif +# endif +# endif +# endif +# endif +#endif +#endif + +/* ------------------- size_t and alignment properties -------------------- */ + +/* The byte and bit size of a size_t */ +#define SIZE_T_SIZE (sizeof(size_t)) +#define SIZE_T_BITSIZE (sizeof(size_t) << 3) + +/* Some constants coerced to size_t */ +/* Annoying but necessary to avoid errors on some platforms */ +#define SIZE_T_ZERO ((size_t)0) +#define SIZE_T_ONE ((size_t)1) +#define SIZE_T_TWO ((size_t)2) +#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) +#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) +#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) +#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) + +/* The bit mask value corresponding to MALLOC_ALIGNMENT */ +#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) + +/* True if address a has acceptable alignment */ +#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) + +/* the number of bytes to offset an address to align it */ +#define align_offset(A)\ + ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ + ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) + +/* -------------------------- MMAP preliminaries ------------------------- */ + +/* + If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and + checks to fail so compiler optimizer can delete code rather than + using so many "#if"s. +*/ + + +/* MORECORE and MMAP must return MFAIL on failure */ +#define MFAIL ((void*)(MAX_SIZE_T)) +#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ + +#if !HAVE_MMAP +#define IS_MMAPPED_BIT (SIZE_T_ZERO) +#define USE_MMAP_BIT (SIZE_T_ZERO) +#define CALL_MMAP(s) MFAIL +#define CALL_MUNMAP(a, s) (-1) +#define DIRECT_MMAP(s) MFAIL + +#else /* HAVE_MMAP */ +#define IS_MMAPPED_BIT (SIZE_T_ONE) +#define USE_MMAP_BIT (SIZE_T_ONE) + +#if !defined(WIN32) && !defined (__OS2__) +#define CALL_MUNMAP(a, s) munmap((a), (s)) +#define MMAP_PROT (PROT_READ|PROT_WRITE) +#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) +#define MAP_ANONYMOUS MAP_ANON +#endif /* MAP_ANON */ +#ifdef MAP_ANONYMOUS +#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) +#define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0) +#else /* MAP_ANONYMOUS */ +/* + Nearly all versions of mmap support MAP_ANONYMOUS, so the following + is unlikely to be needed, but is supplied just in case. +*/ +#define MMAP_FLAGS (MAP_PRIVATE) +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ +#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \ + (dev_zero_fd = open("/dev/zero", O_RDWR), \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) +#endif /* MAP_ANONYMOUS */ + +#define DIRECT_MMAP(s) CALL_MMAP(s) + +#elif defined(__OS2__) + +/* OS/2 MMAP via DosAllocMem */ +static void* os2mmap(size_t size) { + void* ptr; + if (DosAllocMem(&ptr, size, OBJ_ANY|PAG_COMMIT|PAG_READ|PAG_WRITE) && + DosAllocMem(&ptr, size, PAG_COMMIT|PAG_READ|PAG_WRITE)) + return MFAIL; + return ptr; +} + +#define os2direct_mmap(n) os2mmap(n) + +/* This function supports releasing coalesed segments */ +static int os2munmap(void* ptr, size_t size) { + while (size) { + ULONG ulSize = size; + ULONG ulFlags = 0; + if (DosQueryMem(ptr, &ulSize, &ulFlags) != 0) + return -1; + if ((ulFlags & PAG_BASE) == 0 ||(ulFlags & PAG_COMMIT) == 0 || + ulSize > size) + return -1; + if (DosFreeMem(ptr) != 0) + return -1; + ptr = ( void * ) ( ( char * ) ptr + ulSize ); + size -= ulSize; + } + return 0; +} + +#define CALL_MMAP(s) os2mmap(s) +#define CALL_MUNMAP(a, s) os2munmap((a), (s)) +#define DIRECT_MMAP(s) os2direct_mmap(s) + +#else /* WIN32 */ + +/* Win32 MMAP via VirtualAlloc */ +static void* win32mmap(size_t size) { + void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_EXECUTE_READWRITE); + return (ptr != 0)? ptr: MFAIL; +} + +/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ +static void* win32direct_mmap(size_t size) { + void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, + PAGE_EXECUTE_READWRITE); + return (ptr != 0)? ptr: MFAIL; +} + +/* This function supports releasing coalesed segments */ +static int win32munmap(void* ptr, size_t size) { + MEMORY_BASIC_INFORMATION minfo; + char* cptr = ptr; + while (size) { + if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) + return -1; + if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || + minfo.State != MEM_COMMIT || minfo.RegionSize > size) + return -1; + if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) + return -1; + cptr += minfo.RegionSize; + size -= minfo.RegionSize; + } + return 0; +} + +#define CALL_MMAP(s) win32mmap(s) +#define CALL_MUNMAP(a, s) win32munmap((a), (s)) +#define DIRECT_MMAP(s) win32direct_mmap(s) +#endif /* WIN32 */ +#endif /* HAVE_MMAP */ + +#if HAVE_MMAP && HAVE_MREMAP +#define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) +#else /* HAVE_MMAP && HAVE_MREMAP */ +#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL +#endif /* HAVE_MMAP && HAVE_MREMAP */ + +#if HAVE_MORECORE +#define CALL_MORECORE(S) MORECORE(S) +#else /* HAVE_MORECORE */ +#define CALL_MORECORE(S) MFAIL +#endif /* HAVE_MORECORE */ + +/* mstate bit set if contiguous morecore disabled or failed */ +#define USE_NONCONTIGUOUS_BIT (4U) + +/* segment bit set in create_mspace_with_base */ +#define EXTERN_BIT (8U) + + +/* --------------------------- Lock preliminaries ------------------------ */ + +#if USE_LOCKS + +/* + When locks are defined, there are up to two global locks: + + * If HAVE_MORECORE, morecore_mutex protects sequences of calls to + MORECORE. In many cases sys_alloc requires two calls, that should + not be interleaved with calls by other threads. This does not + protect against direct calls to MORECORE by other threads not + using this lock, so there is still code to cope the best we can on + interference. + + * magic_init_mutex ensures that mparams.magic and other + unique mparams values are initialized only once. +*/ + +#if !defined(WIN32) && !defined(__OS2__) +/* By default use posix locks */ +#include +#define MLOCK_T pthread_mutex_t +#define INITIAL_LOCK(l) pthread_mutex_init(l, NULL) +#define ACQUIRE_LOCK(l) pthread_mutex_lock(l) +#define RELEASE_LOCK(l) pthread_mutex_unlock(l) + +#if HAVE_MORECORE +static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER; +#endif /* HAVE_MORECORE */ + +static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER; + +#elif defined(__OS2__) +#define MLOCK_T HMTX +#define INITIAL_LOCK(l) DosCreateMutexSem(0, l, 0, FALSE) +#define ACQUIRE_LOCK(l) DosRequestMutexSem(*l, SEM_INDEFINITE_WAIT) +#define RELEASE_LOCK(l) DosReleaseMutexSem(*l) +#if HAVE_MORECORE +static MLOCK_T morecore_mutex; +#endif /* HAVE_MORECORE */ +static MLOCK_T magic_init_mutex; + +#else /* WIN32 */ +/* + Because lock-protected regions have bounded times, and there + are no recursive lock calls, we can use simple spinlocks. +*/ + +#define MLOCK_T long +static int win32_acquire_lock (MLOCK_T *sl) { + for (;;) { +#ifdef InterlockedCompareExchangePointer + if (!InterlockedCompareExchange(sl, 1, 0)) + return 0; +#else /* Use older void* version */ + if (!InterlockedCompareExchange((void**)sl, (void*)1, (void*)0)) + return 0; +#endif /* InterlockedCompareExchangePointer */ + Sleep (0); + } +} + +static void win32_release_lock (MLOCK_T *sl) { + InterlockedExchange (sl, 0); +} + +#define INITIAL_LOCK(l) *(l)=0 +#define ACQUIRE_LOCK(l) win32_acquire_lock(l) +#define RELEASE_LOCK(l) win32_release_lock(l) +#if HAVE_MORECORE +static MLOCK_T morecore_mutex; +#endif /* HAVE_MORECORE */ +static MLOCK_T magic_init_mutex; +#endif /* WIN32 */ + +#define USE_LOCK_BIT (2U) +#else /* USE_LOCKS */ +#define USE_LOCK_BIT (0U) +#define INITIAL_LOCK(l) +#endif /* USE_LOCKS */ + +#if USE_LOCKS && HAVE_MORECORE +#define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex); +#define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex); +#else /* USE_LOCKS && HAVE_MORECORE */ +#define ACQUIRE_MORECORE_LOCK() +#define RELEASE_MORECORE_LOCK() +#endif /* USE_LOCKS && HAVE_MORECORE */ + +#if USE_LOCKS +#define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex); +#define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex); +#else /* USE_LOCKS */ +#define ACQUIRE_MAGIC_INIT_LOCK() +#define RELEASE_MAGIC_INIT_LOCK() +#endif /* USE_LOCKS */ + + +/* ----------------------- Chunk representations ------------------------ */ + +/* + (The following includes lightly edited explanations by Colin Plumb.) + + The malloc_chunk declaration below is misleading (but accurate and + necessary). It declares a "view" into memory allowing access to + necessary fields at known offsets from a given base. + + Chunks of memory are maintained using a `boundary tag' method as + originally described by Knuth. (See the paper by Paul Wilson + ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such + techniques.) Sizes of free chunks are stored both in the front of + each chunk and at the end. This makes consolidating fragmented + chunks into bigger chunks fast. The head fields also hold bits + representing whether chunks are free or in use. + + Here are some pictures to make it clearer. They are "exploded" to + show that the state of a chunk can be thought of as extending from + the high 31 bits of the head field of its header through the + prev_foot and PINUSE_BIT bit of the following chunk header. + + A chunk that's in use looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk (if P = 1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 1| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | + +- -+ + | | + +- -+ + | : + +- size - sizeof(size_t) available payload bytes -+ + : | + chunk-> +- -+ + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1| + | Size of next chunk (may or may not be in use) | +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + And if it's free, it looks like this: + + chunk-> +- -+ + | User payload (must be in use, or we would have merged!) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 0| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Next pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Prev pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- size - sizeof(struct chunk) unused bytes -+ + : | + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0| + | Size of next chunk (must be in use, or we would have merged)| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- User payload -+ + : | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |0| + +-+ + Note that since we always merge adjacent free chunks, the chunks + adjacent to a free chunk must be in use. + + Given a pointer to a chunk (which can be derived trivially from the + payload pointer) we can, in O(1) time, find out whether the adjacent + chunks are free, and if so, unlink them from the lists that they + are on and merge them with the current chunk. + + Chunks always begin on even word boundaries, so the mem portion + (which is returned to the user) is also on an even word boundary, and + thus at least double-word aligned. + + The P (PINUSE_BIT) bit, stored in the unused low-order bit of the + chunk size (which is always a multiple of two words), is an in-use + bit for the *previous* chunk. If that bit is *clear*, then the + word before the current chunk size contains the previous chunk + size, and can be used to find the front of the previous chunk. + The very first chunk allocated always has this bit set, preventing + access to non-existent (or non-owned) memory. If pinuse is set for + any given chunk, then you CANNOT determine the size of the + previous chunk, and might even get a memory addressing fault when + trying to do so. + + The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of + the chunk size redundantly records whether the current chunk is + inuse. This redundancy enables usage checks within free and realloc, + and reduces indirection when freeing and consolidating chunks. + + Each freshly allocated chunk must have both cinuse and pinuse set. + That is, each allocated chunk borders either a previously allocated + and still in-use chunk, or the base of its memory arena. This is + ensured by making all allocations from the the `lowest' part of any + found chunk. Further, no free chunk physically borders another one, + so each free chunk is known to be preceded and followed by either + inuse chunks or the ends of memory. + + Note that the `foot' of the current chunk is actually represented + as the prev_foot of the NEXT chunk. This makes it easier to + deal with alignments etc but can be very confusing when trying + to extend or adapt this code. + + The exceptions to all this are + + 1. The special chunk `top' is the top-most available chunk (i.e., + the one bordering the end of available memory). It is treated + specially. Top is never included in any bin, is used only if + no other chunk is available, and is released back to the + system if it is very large (see M_TRIM_THRESHOLD). In effect, + the top chunk is treated as larger (and thus less well + fitting) than any other available chunk. The top chunk + doesn't update its trailing size field since there is no next + contiguous chunk that would have to index off it. However, + space is still allocated for it (TOP_FOOT_SIZE) to enable + separation or merging when space is extended. + + 3. Chunks allocated via mmap, which have the lowest-order bit + (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set + PINUSE_BIT in their head fields. Because they are allocated + one-by-one, each must carry its own prev_foot field, which is + also used to hold the offset this chunk has within its mmapped + region, which is needed to preserve alignment. Each mmapped + chunk is trailed by the first two fields of a fake next-chunk + for sake of usage checks. + +*/ + +struct malloc_chunk { + size_t prev_foot; /* Size of previous chunk (if free). */ + size_t head; /* Size and inuse bits. */ + struct malloc_chunk* fd; /* double links -- used only if free. */ + struct malloc_chunk* bk; +}; + +typedef struct malloc_chunk mchunk; +typedef struct malloc_chunk* mchunkptr; +typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ +typedef size_t bindex_t; /* Described below */ +typedef unsigned int binmap_t; /* Described below */ +typedef unsigned int flag_t; /* The type of various bit flag sets */ + +/* ------------------- Chunks sizes and alignments ----------------------- */ + +#define MCHUNK_SIZE (sizeof(mchunk)) + +#if FOOTERS +#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +#else /* FOOTERS */ +#define CHUNK_OVERHEAD (SIZE_T_SIZE) +#endif /* FOOTERS */ + +/* MMapped chunks need a second word of overhead ... */ +#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +/* ... and additional padding for fake next-chunk at foot */ +#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) + +/* The smallest size we can malloc is an aligned minimal chunk */ +#define MIN_CHUNK_SIZE\ + ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* conversion from malloc headers to user pointers, and back */ +#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) +/* chunk associated with aligned address A */ +#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) + +/* Bounds on request (not chunk) sizes. */ +#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) +#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) + +/* pad request bytes into a usable size */ +#define pad_request(req) \ + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* pad request, checking for minimum (but not maximum) */ +#define request2size(req) \ + (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) + + +/* ------------------ Operations on head and foot fields ----------------- */ + +/* + The head field of a chunk is or'ed with PINUSE_BIT when previous + adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in + use. If the chunk was obtained with mmap, the prev_foot field has + IS_MMAPPED_BIT set, otherwise holding the offset of the base of the + mmapped region to the base of the chunk. +*/ + +#define PINUSE_BIT (SIZE_T_ONE) +#define CINUSE_BIT (SIZE_T_TWO) +#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) + +/* Head value for fenceposts */ +#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) + +/* extraction of fields from head words */ +#define cinuse(p) ((p)->head & CINUSE_BIT) +#define pinuse(p) ((p)->head & PINUSE_BIT) +#define chunksize(p) ((p)->head & ~(INUSE_BITS)) + +#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) +#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT) + +/* Treat space at ptr +/- offset as a chunk */ +#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) +#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) + +/* Ptr to next or previous physical malloc_chunk. */ +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS))) +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) + +/* extract next chunk's pinuse bit */ +#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) + +/* Get/set size at footer */ +#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) +#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) + +/* Set size, pinuse bit, and foot */ +#define set_size_and_pinuse_of_free_chunk(p, s)\ + ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) + +/* Set size, pinuse bit, foot, and clear next pinuse */ +#define set_free_with_pinuse(p, s, n)\ + (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) + +#define is_mmapped(p)\ + (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT)) + +/* Get the internal overhead associated with chunk p */ +#define overhead_for(p)\ + (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) + +/* Return true if malloced space is not necessarily cleared */ +#if MMAP_CLEARS +#define calloc_must_clear(p) (!is_mmapped(p)) +#else /* MMAP_CLEARS */ +#define calloc_must_clear(p) (1) +#endif /* MMAP_CLEARS */ + +/* ---------------------- Overlaid data structures ----------------------- */ + +/* + When chunks are not in use, they are treated as nodes of either + lists or trees. + + "Small" chunks are stored in circular doubly-linked lists, and look + like this: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space (may be 0 bytes long) . + . . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Larger chunks are kept in a form of bitwise digital trees (aka + tries) keyed on chunksizes. Because malloc_tree_chunks are only for + free chunks greater than 256 bytes, their size doesn't impose any + constraints on user chunk sizes. Each node looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to left child (child[0]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to right child (child[1]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to parent | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | bin index of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Each tree holding treenodes is a tree of unique chunk sizes. Chunks + of the same size are arranged in a circularly-linked list, with only + the oldest chunk (the next to be used, in our FIFO ordering) + actually in the tree. (Tree members are distinguished by a non-null + parent pointer.) If a chunk with the same size an an existing node + is inserted, it is linked off the existing node using pointers that + work in the same way as fd/bk pointers of small chunks. + + Each tree contains a power of 2 sized range of chunk sizes (the + smallest is 0x100 <= x < 0x180), which is is divided in half at each + tree level, with the chunks in the smaller half of the range (0x100 + <= x < 0x140 for the top nose) in the left subtree and the larger + half (0x140 <= x < 0x180) in the right subtree. This is, of course, + done by inspecting individual bits. + + Using these rules, each node's left subtree contains all smaller + sizes than its right subtree. However, the node at the root of each + subtree has no particular ordering relationship to either. (The + dividing line between the subtree sizes is based on trie relation.) + If we remove the last chunk of a given size from the interior of the + tree, we need to replace it with a leaf node. The tree ordering + rules permit a node to be replaced by any leaf below it. + + The smallest chunk in a tree (a common operation in a best-fit + allocator) can be found by walking a path to the leftmost leaf in + the tree. Unlike a usual binary tree, where we follow left child + pointers until we reach a null, here we follow the right child + pointer any time the left one is null, until we reach a leaf with + both child pointers null. The smallest chunk in the tree will be + somewhere along that path. + + The worst case number of steps to add, find, or remove a node is + bounded by the number of bits differentiating chunks within + bins. Under current bin calculations, this ranges from 6 up to 21 + (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case + is of course much better. +*/ + +struct malloc_tree_chunk { + /* The first four fields must be compatible with malloc_chunk */ + size_t prev_foot; + size_t head; + struct malloc_tree_chunk* fd; + struct malloc_tree_chunk* bk; + + struct malloc_tree_chunk* child[2]; + struct malloc_tree_chunk* parent; + bindex_t index; +}; + +typedef struct malloc_tree_chunk tchunk; +typedef struct malloc_tree_chunk* tchunkptr; +typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ + +/* A little helper macro for trees */ +#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) + +/* ----------------------------- Segments -------------------------------- */ + +/* + Each malloc space may include non-contiguous segments, held in a + list headed by an embedded malloc_segment record representing the + top-most space. Segments also include flags holding properties of + the space. Large chunks that are directly allocated by mmap are not + included in this list. They are instead independently created and + destroyed without otherwise keeping track of them. + + Segment management mainly comes into play for spaces allocated by + MMAP. Any call to MMAP might or might not return memory that is + adjacent to an existing segment. MORECORE normally contiguously + extends the current space, so this space is almost always adjacent, + which is simpler and faster to deal with. (This is why MORECORE is + used preferentially to MMAP when both are available -- see + sys_alloc.) When allocating using MMAP, we don't use any of the + hinting mechanisms (inconsistently) supported in various + implementations of unix mmap, or distinguish reserving from + committing memory. Instead, we just ask for space, and exploit + contiguity when we get it. It is probably possible to do + better than this on some systems, but no general scheme seems + to be significantly better. + + Management entails a simpler variant of the consolidation scheme + used for chunks to reduce fragmentation -- new adjacent memory is + normally prepended or appended to an existing segment. However, + there are limitations compared to chunk consolidation that mostly + reflect the fact that segment processing is relatively infrequent + (occurring only when getting memory from system) and that we + don't expect to have huge numbers of segments: + + * Segments are not indexed, so traversal requires linear scans. (It + would be possible to index these, but is not worth the extra + overhead and complexity for most programs on most platforms.) + * New segments are only appended to old ones when holding top-most + memory; if they cannot be prepended to others, they are held in + different segments. + + Except for the top-most segment of an mstate, each segment record + is kept at the tail of its segment. Segments are added by pushing + segment records onto the list headed by &mstate.seg for the + containing mstate. + + Segment flags control allocation/merge/deallocation policies: + * If EXTERN_BIT set, then we did not allocate this segment, + and so should not try to deallocate or merge with others. + (This currently holds only for the initial segment passed + into create_mspace_with_base.) + * If IS_MMAPPED_BIT set, the segment may be merged with + other surrounding mmapped segments and trimmed/de-allocated + using munmap. + * If neither bit is set, then the segment was obtained using + MORECORE so can be merged with surrounding MORECORE'd segments + and deallocated/trimmed using MORECORE with negative arguments. +*/ + +struct malloc_segment { + char* base; /* base address */ + size_t size; /* allocated size */ + struct malloc_segment* next; /* ptr to next segment */ +#if FFI_MMAP_EXEC_WRIT + /* The mmap magic is supposed to store the address of the executable + segment at the very end of the requested block. */ + +# define mmap_exec_offset(b,s) (*(ptrdiff_t*)((b)+(s)-sizeof(ptrdiff_t))) + + /* We can only merge segments if their corresponding executable + segments are at identical offsets. */ +# define check_segment_merge(S,b,s) \ + (mmap_exec_offset((b),(s)) == (S)->exec_offset) + +# define add_segment_exec_offset(p,S) ((char*)(p) + (S)->exec_offset) +# define sub_segment_exec_offset(p,S) ((char*)(p) - (S)->exec_offset) + + /* The removal of sflags only works with HAVE_MORECORE == 0. */ + +# define get_segment_flags(S) (IS_MMAPPED_BIT) +# define set_segment_flags(S,v) \ + (((v) != IS_MMAPPED_BIT) ? (ABORT, (v)) : \ + (((S)->exec_offset = \ + mmap_exec_offset((S)->base, (S)->size)), \ + (mmap_exec_offset((S)->base + (S)->exec_offset, (S)->size) != \ + (S)->exec_offset) ? (ABORT, (v)) : \ + (mmap_exec_offset((S)->base, (S)->size) = 0), (v))) + + /* We use an offset here, instead of a pointer, because then, when + base changes, we don't have to modify this. On architectures + with segmented addresses, this might not work. */ + ptrdiff_t exec_offset; +#else + +# define get_segment_flags(S) ((S)->sflags) +# define set_segment_flags(S,v) ((S)->sflags = (v)) +# define check_segment_merge(S,b,s) (1) + + flag_t sflags; /* mmap and extern flag */ +#endif +}; + +#define is_mmapped_segment(S) (get_segment_flags(S) & IS_MMAPPED_BIT) +#define is_extern_segment(S) (get_segment_flags(S) & EXTERN_BIT) + +typedef struct malloc_segment msegment; +typedef struct malloc_segment* msegmentptr; + +/* ---------------------------- malloc_state ----------------------------- */ + +/* + A malloc_state holds all of the bookkeeping for a space. + The main fields are: + + Top + The topmost chunk of the currently active segment. Its size is + cached in topsize. The actual size of topmost space is + topsize+TOP_FOOT_SIZE, which includes space reserved for adding + fenceposts and segment records if necessary when getting more + space from the system. The size at which to autotrim top is + cached from mparams in trim_check, except that it is disabled if + an autotrim fails. + + Designated victim (dv) + This is the preferred chunk for servicing small requests that + don't have exact fits. It is normally the chunk split off most + recently to service another small request. Its size is cached in + dvsize. The link fields of this chunk are not maintained since it + is not kept in a bin. + + SmallBins + An array of bin headers for free chunks. These bins hold chunks + with sizes less than MIN_LARGE_SIZE bytes. Each bin contains + chunks of all the same size, spaced 8 bytes apart. To simplify + use in double-linked lists, each bin header acts as a malloc_chunk + pointing to the real first node, if it exists (else pointing to + itself). This avoids special-casing for headers. But to avoid + waste, we allocate only the fd/bk pointers of bins, and then use + repositioning tricks to treat these as the fields of a chunk. + + TreeBins + Treebins are pointers to the roots of trees holding a range of + sizes. There are 2 equally spaced treebins for each power of two + from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything + larger. + + Bin maps + There is one bit map for small bins ("smallmap") and one for + treebins ("treemap). Each bin sets its bit when non-empty, and + clears the bit when empty. Bit operations are then used to avoid + bin-by-bin searching -- nearly all "search" is done without ever + looking at bins that won't be selected. The bit maps + conservatively use 32 bits per map word, even if on 64bit system. + For a good description of some of the bit-based techniques used + here, see Henry S. Warren Jr's book "Hacker's Delight" (and + supplement at http://hackersdelight.org/). Many of these are + intended to reduce the branchiness of paths through malloc etc, as + well as to reduce the number of memory locations read or written. + + Segments + A list of segments headed by an embedded malloc_segment record + representing the initial space. + + Address check support + The least_addr field is the least address ever obtained from + MORECORE or MMAP. Attempted frees and reallocs of any address less + than this are trapped (unless INSECURE is defined). + + Magic tag + A cross-check field that should always hold same value as mparams.magic. + + Flags + Bits recording whether to use MMAP, locks, or contiguous MORECORE + + Statistics + Each space keeps track of current and maximum system memory + obtained via MORECORE or MMAP. + + Locking + If USE_LOCKS is defined, the "mutex" lock is acquired and released + around every public call using this mspace. +*/ + +/* Bin types, widths and sizes */ +#define NSMALLBINS (32U) +#define NTREEBINS (32U) +#define SMALLBIN_SHIFT (3U) +#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) +#define TREEBIN_SHIFT (8U) +#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) +#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) +#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) + +struct malloc_state { + binmap_t smallmap; + binmap_t treemap; + size_t dvsize; + size_t topsize; + char* least_addr; + mchunkptr dv; + mchunkptr top; + size_t trim_check; + size_t magic; + mchunkptr smallbins[(NSMALLBINS+1)*2]; + tbinptr treebins[NTREEBINS]; + size_t footprint; + size_t max_footprint; + flag_t mflags; +#if USE_LOCKS + MLOCK_T mutex; /* locate lock among fields that rarely change */ +#endif /* USE_LOCKS */ + msegment seg; +}; + +typedef struct malloc_state* mstate; + +/* ------------- Global malloc_state and malloc_params ------------------- */ + +/* + malloc_params holds global properties, including those that can be + dynamically set using mallopt. There is a single instance, mparams, + initialized in init_mparams. +*/ + +struct malloc_params { + size_t magic; + size_t page_size; + size_t granularity; + size_t mmap_threshold; + size_t trim_threshold; + flag_t default_mflags; +}; + +static struct malloc_params mparams; + +/* The global malloc_state used for all non-"mspace" calls */ +static struct malloc_state _gm_; +#define gm (&_gm_) +#define is_global(M) ((M) == &_gm_) +#define is_initialized(M) ((M)->top != 0) + +/* -------------------------- system alloc setup ------------------------- */ + +/* Operations on mflags */ + +#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) +#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) +#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) + +#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) +#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) +#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) + +#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) +#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) + +#define set_lock(M,L)\ + ((M)->mflags = (L)?\ + ((M)->mflags | USE_LOCK_BIT) :\ + ((M)->mflags & ~USE_LOCK_BIT)) + +/* page-align a size */ +#define page_align(S)\ + (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE)) + +/* granularity-align a size */ +#define granularity_align(S)\ + (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE)) + +#define is_page_aligned(S)\ + (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) +#define is_granularity_aligned(S)\ + (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) + +/* True if segment S holds address A */ +#define segment_holds(S, A)\ + ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) + +/* Return segment holding given address */ +static msegmentptr segment_holding(mstate m, char* addr) { + msegmentptr sp = &m->seg; + for (;;) { + if (addr >= sp->base && addr < sp->base + sp->size) + return sp; + if ((sp = sp->next) == 0) + return 0; + } +} + +/* Return true if segment contains a segment link */ +static int has_segment_link(mstate m, msegmentptr ss) { + msegmentptr sp = &m->seg; + for (;;) { + if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) + return 1; + if ((sp = sp->next) == 0) + return 0; + } +} + +#ifndef MORECORE_CANNOT_TRIM +#define should_trim(M,s) ((s) > (M)->trim_check) +#else /* MORECORE_CANNOT_TRIM */ +#define should_trim(M,s) (0) +#endif /* MORECORE_CANNOT_TRIM */ + +/* + TOP_FOOT_SIZE is padding at the end of a segment, including space + that may be needed to place segment records and fenceposts when new + noncontiguous segments are added. +*/ +#define TOP_FOOT_SIZE\ + (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) + + +/* ------------------------------- Hooks -------------------------------- */ + +/* + PREACTION should be defined to return 0 on success, and nonzero on + failure. If you are not using locking, you can redefine these to do + anything you like. +*/ + +#if USE_LOCKS + +/* Ensure locks are initialized */ +#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams()) + +#define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) +#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } +#else /* USE_LOCKS */ + +#ifndef PREACTION +#define PREACTION(M) (0) +#endif /* PREACTION */ + +#ifndef POSTACTION +#define POSTACTION(M) +#endif /* POSTACTION */ + +#endif /* USE_LOCKS */ + +/* + CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. + USAGE_ERROR_ACTION is triggered on detected bad frees and + reallocs. The argument p is an address that might have triggered the + fault. It is ignored by the two predefined actions, but might be + useful in custom actions that try to help diagnose errors. +*/ + +#if PROCEED_ON_ERROR + +/* A count of the number of corruption errors causing resets */ +int malloc_corruption_error_count; + +/* default corruption action */ +static void reset_on_error(mstate m); + +#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) +#define USAGE_ERROR_ACTION(m, p) + +#else /* PROCEED_ON_ERROR */ + +#ifndef CORRUPTION_ERROR_ACTION +#define CORRUPTION_ERROR_ACTION(m) ABORT +#endif /* CORRUPTION_ERROR_ACTION */ + +#ifndef USAGE_ERROR_ACTION +#define USAGE_ERROR_ACTION(m,p) ABORT +#endif /* USAGE_ERROR_ACTION */ + +#endif /* PROCEED_ON_ERROR */ + +/* -------------------------- Debugging setup ---------------------------- */ + +#if ! DEBUG + +#define check_free_chunk(M,P) +#define check_inuse_chunk(M,P) +#define check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) +#define check_malloc_state(M) +#define check_top_chunk(M,P) + +#else /* DEBUG */ +#define check_free_chunk(M,P) do_check_free_chunk(M,P) +#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) +#define check_top_chunk(M,P) do_check_top_chunk(M,P) +#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) +#define check_malloc_state(M) do_check_malloc_state(M) + +static void do_check_any_chunk(mstate m, mchunkptr p); +static void do_check_top_chunk(mstate m, mchunkptr p); +static void do_check_mmapped_chunk(mstate m, mchunkptr p); +static void do_check_inuse_chunk(mstate m, mchunkptr p); +static void do_check_free_chunk(mstate m, mchunkptr p); +static void do_check_malloced_chunk(mstate m, void* mem, size_t s); +static void do_check_tree(mstate m, tchunkptr t); +static void do_check_treebin(mstate m, bindex_t i); +static void do_check_smallbin(mstate m, bindex_t i); +static void do_check_malloc_state(mstate m); +static int bin_find(mstate m, mchunkptr x); +static size_t traverse_and_check(mstate m); +#endif /* DEBUG */ + +/* ---------------------------- Indexing Bins ---------------------------- */ + +#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) +#define small_index(s) ((s) >> SMALLBIN_SHIFT) +#define small_index2size(i) ((i) << SMALLBIN_SHIFT) +#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) + +/* addressing by index. See above about smallbin repositioning */ +#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) +#define treebin_at(M,i) (&((M)->treebins[i])) + +/* assign tree index for size S to variable I */ +#if defined(__GNUC__) && defined(__i386__) +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K;\ + __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} +#else /* GNUC */ +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int Y = (unsigned int)X;\ + unsigned int N = ((Y - 0x100) >> 16) & 8;\ + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ + N += K;\ + N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ + K = 14 - N + ((Y <<= K) >> 15);\ + I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ + }\ +} +#endif /* GNUC */ + +/* Bit representing maximum resolved size in a treebin at i */ +#define bit_for_tree_index(i) \ + (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) + +/* Shift placing maximum resolved bit in a treebin at i as sign bit */ +#define leftshift_for_tree_index(i) \ + ((i == NTREEBINS-1)? 0 : \ + ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + +/* The size of the smallest chunk held in bin with index i */ +#define minsize_for_tree_index(i) \ + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ + (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) + + +/* ------------------------ Operations on bin maps ----------------------- */ + +/* bit corresponding to given index */ +#define idx2bit(i) ((binmap_t)(1) << (i)) + +/* Mark/Clear bits with given index */ +#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) +#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) +#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) + +#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) +#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) +#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) + +/* index corresponding to given bit */ + +#if defined(__GNUC__) && defined(__i386__) +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\ + I = (bindex_t)J;\ +} + +#else /* GNUC */ +#if USE_BUILTIN_FFS +#define compute_bit2idx(X, I) I = ffs(X)-1 + +#else /* USE_BUILTIN_FFS */ +#define compute_bit2idx(X, I)\ +{\ + unsigned int Y = X - 1;\ + unsigned int K = Y >> (16-4) & 16;\ + unsigned int N = K; Y >>= K;\ + N += K = Y >> (8-3) & 8; Y >>= K;\ + N += K = Y >> (4-2) & 4; Y >>= K;\ + N += K = Y >> (2-1) & 2; Y >>= K;\ + N += K = Y >> (1-0) & 1; Y >>= K;\ + I = (bindex_t)(N + Y);\ +} +#endif /* USE_BUILTIN_FFS */ +#endif /* GNUC */ + +/* isolate the least set bit of a bitmap */ +#define least_bit(x) ((x) & -(x)) + +/* mask with all bits to left of least bit of x on */ +#define left_bits(x) ((x<<1) | -(x<<1)) + +/* mask with all bits to left of or equal to least bit of x on */ +#define same_or_left_bits(x) ((x) | -(x)) + + +/* ----------------------- Runtime Check Support ------------------------- */ + +/* + For security, the main invariant is that malloc/free/etc never + writes to a static address other than malloc_state, unless static + malloc_state itself has been corrupted, which cannot occur via + malloc (because of these checks). In essence this means that we + believe all pointers, sizes, maps etc held in malloc_state, but + check all of those linked or offsetted from other embedded data + structures. These checks are interspersed with main code in a way + that tends to minimize their run-time cost. + + When FOOTERS is defined, in addition to range checking, we also + verify footer fields of inuse chunks, which can be used guarantee + that the mstate controlling malloc/free is intact. This is a + streamlined version of the approach described by William Robertson + et al in "Run-time Detection of Heap-based Overflows" LISA'03 + http://www.usenix.org/events/lisa03/tech/robertson.html The footer + of an inuse chunk holds the xor of its mstate and a random seed, + that is checked upon calls to free() and realloc(). This is + (probablistically) unguessable from outside the program, but can be + computed by any code successfully malloc'ing any chunk, so does not + itself provide protection against code that has already broken + security through some other means. Unlike Robertson et al, we + always dynamically check addresses of all offset chunks (previous, + next, etc). This turns out to be cheaper than relying on hashes. +*/ + +#if !INSECURE +/* Check if address a is at least as high as any from MORECORE or MMAP */ +#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) +/* Check if address of next chunk n is higher than base chunk p */ +#define ok_next(p, n) ((char*)(p) < (char*)(n)) +/* Check if p has its cinuse bit on */ +#define ok_cinuse(p) cinuse(p) +/* Check if p has its pinuse bit on */ +#define ok_pinuse(p) pinuse(p) + +#else /* !INSECURE */ +#define ok_address(M, a) (1) +#define ok_next(b, n) (1) +#define ok_cinuse(p) (1) +#define ok_pinuse(p) (1) +#endif /* !INSECURE */ + +#if (FOOTERS && !INSECURE) +/* Check if (alleged) mstate m has expected magic field */ +#define ok_magic(M) ((M)->magic == mparams.magic) +#else /* (FOOTERS && !INSECURE) */ +#define ok_magic(M) (1) +#endif /* (FOOTERS && !INSECURE) */ + + +/* In gcc, use __builtin_expect to minimize impact of checks */ +#if !INSECURE +#if defined(__GNUC__) && __GNUC__ >= 3 +#define RTCHECK(e) __builtin_expect(e, 1) +#else /* GNUC */ +#define RTCHECK(e) (e) +#endif /* GNUC */ +#else /* !INSECURE */ +#define RTCHECK(e) (1) +#endif /* !INSECURE */ + +/* macros to set up inuse chunks with or without footers */ + +#if !FOOTERS + +#define mark_inuse_foot(M,p,s) + +/* Set cinuse bit and pinuse bit of next chunk */ +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set size, cinuse and pinuse bit of this chunk */ +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) + +#else /* FOOTERS */ + +/* Set foot of inuse chunk to be xor of mstate and seed */ +#define mark_inuse_foot(M,p,s)\ + (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) + +#define get_mstate_for(p)\ + ((mstate)(((mchunkptr)((char*)(p) +\ + (chunksize(p))))->prev_foot ^ mparams.magic)) + +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M,p,s)) + +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ + mark_inuse_foot(M,p,s)) + +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + mark_inuse_foot(M, p, s)) + +#endif /* !FOOTERS */ + +/* ---------------------------- setting mparams -------------------------- */ + +/* Initialize mparams */ +static int init_mparams(void) { + if (mparams.page_size == 0) { + size_t s; + + mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; + mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; +#if MORECORE_CONTIGUOUS + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; +#else /* MORECORE_CONTIGUOUS */ + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; +#endif /* MORECORE_CONTIGUOUS */ + +#if (FOOTERS && !INSECURE) + { +#if USE_DEV_RANDOM + int fd; + unsigned char buf[sizeof(size_t)]; + /* Try to use /dev/urandom, else fall back on using time */ + if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && + read(fd, buf, sizeof(buf)) == sizeof(buf)) { + s = *((size_t *) buf); + close(fd); + } + else +#endif /* USE_DEV_RANDOM */ + s = (size_t)(time(0) ^ (size_t)0x55555555U); + + s |= (size_t)8U; /* ensure nonzero */ + s &= ~(size_t)7U; /* improve chances of fault for bad values */ + + } +#else /* (FOOTERS && !INSECURE) */ + s = (size_t)0x58585858U; +#endif /* (FOOTERS && !INSECURE) */ + ACQUIRE_MAGIC_INIT_LOCK(); + if (mparams.magic == 0) { + mparams.magic = s; + /* Set up lock for main malloc area */ + INITIAL_LOCK(&gm->mutex); + gm->mflags = mparams.default_mflags; + } + RELEASE_MAGIC_INIT_LOCK(); + +#if !defined(WIN32) && !defined(__OS2__) + mparams.page_size = malloc_getpagesize; + mparams.granularity = ((DEFAULT_GRANULARITY != 0)? + DEFAULT_GRANULARITY : mparams.page_size); +#elif defined (__OS2__) + /* if low-memory is used, os2munmap() would break + if it were anything other than 64k */ + mparams.page_size = 4096u; + mparams.granularity = 65536u; +#else /* WIN32 */ + { + SYSTEM_INFO system_info; + GetSystemInfo(&system_info); + mparams.page_size = system_info.dwPageSize; + mparams.granularity = system_info.dwAllocationGranularity; + } +#endif /* WIN32 */ + + /* Sanity-check configuration: + size_t must be unsigned and as wide as pointer type. + ints must be at least 4 bytes. + alignment must be at least 8. + Alignment, min chunk size, and page size must all be powers of 2. + */ + if ((sizeof(size_t) != sizeof(char*)) || + (MAX_SIZE_T < MIN_CHUNK_SIZE) || + (sizeof(int) < 4) || + (MALLOC_ALIGNMENT < (size_t)8U) || + ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || + ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || + ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) || + ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) != 0)) + ABORT; + } + return 0; +} + +/* support for mallopt */ +static int change_mparam(int param_number, int value) { + size_t val = (size_t)value; + init_mparams(); + switch(param_number) { + case M_TRIM_THRESHOLD: + mparams.trim_threshold = val; + return 1; + case M_GRANULARITY: + if (val >= mparams.page_size && ((val & (val-1)) == 0)) { + mparams.granularity = val; + return 1; + } + else + return 0; + case M_MMAP_THRESHOLD: + mparams.mmap_threshold = val; + return 1; + default: + return 0; + } +} + +#if DEBUG +/* ------------------------- Debugging Support --------------------------- */ + +/* Check properties of any chunk, whether free, inuse, mmapped etc */ +static void do_check_any_chunk(mstate m, mchunkptr p) { + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); +} + +/* Check properties of top chunk */ +static void do_check_top_chunk(mstate m, mchunkptr p) { + msegmentptr sp = segment_holding(m, (char*)p); + size_t sz = chunksize(p); + assert(sp != 0); + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); + assert(sz == m->topsize); + assert(sz > 0); + assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); + assert(pinuse(p)); + assert(!next_pinuse(p)); +} + +/* Check properties of (inuse) mmapped chunks */ +static void do_check_mmapped_chunk(mstate m, mchunkptr p) { + size_t sz = chunksize(p); + size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD); + assert(is_mmapped(p)); + assert(use_mmap(m)); + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); + assert(!is_small(sz)); + assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); + assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); + assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); +} + +/* Check properties of inuse chunks */ +static void do_check_inuse_chunk(mstate m, mchunkptr p) { + do_check_any_chunk(m, p); + assert(cinuse(p)); + assert(next_pinuse(p)); + /* If not pinuse and not mmapped, previous chunk has OK offset */ + assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); + if (is_mmapped(p)) + do_check_mmapped_chunk(m, p); +} + +/* Check properties of free chunks */ +static void do_check_free_chunk(mstate m, mchunkptr p) { + size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); + mchunkptr next = chunk_plus_offset(p, sz); + do_check_any_chunk(m, p); + assert(!cinuse(p)); + assert(!next_pinuse(p)); + assert (!is_mmapped(p)); + if (p != m->dv && p != m->top) { + if (sz >= MIN_CHUNK_SIZE) { + assert((sz & CHUNK_ALIGN_MASK) == 0); + assert(is_aligned(chunk2mem(p))); + assert(next->prev_foot == sz); + assert(pinuse(p)); + assert (next == m->top || cinuse(next)); + assert(p->fd->bk == p); + assert(p->bk->fd == p); + } + else /* markers are always of size SIZE_T_SIZE */ + assert(sz == SIZE_T_SIZE); + } +} + +/* Check properties of malloced chunks at the point they are malloced */ +static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); + do_check_inuse_chunk(m, p); + assert((sz & CHUNK_ALIGN_MASK) == 0); + assert(sz >= MIN_CHUNK_SIZE); + assert(sz >= s); + /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ + assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); + } +} + +/* Check a tree and its subtrees. */ +static void do_check_tree(mstate m, tchunkptr t) { + tchunkptr head = 0; + tchunkptr u = t; + bindex_t tindex = t->index; + size_t tsize = chunksize(t); + bindex_t idx; + compute_tree_index(tsize, idx); + assert(tindex == idx); + assert(tsize >= MIN_LARGE_SIZE); + assert(tsize >= minsize_for_tree_index(idx)); + assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); + + do { /* traverse through chain of same-sized nodes */ + do_check_any_chunk(m, ((mchunkptr)u)); + assert(u->index == tindex); + assert(chunksize(u) == tsize); + assert(!cinuse(u)); + assert(!next_pinuse(u)); + assert(u->fd->bk == u); + assert(u->bk->fd == u); + if (u->parent == 0) { + assert(u->child[0] == 0); + assert(u->child[1] == 0); + } + else { + assert(head == 0); /* only one node on chain has parent */ + head = u; + assert(u->parent != u); + assert (u->parent->child[0] == u || + u->parent->child[1] == u || + *((tbinptr*)(u->parent)) == u); + if (u->child[0] != 0) { + assert(u->child[0]->parent == u); + assert(u->child[0] != u); + do_check_tree(m, u->child[0]); + } + if (u->child[1] != 0) { + assert(u->child[1]->parent == u); + assert(u->child[1] != u); + do_check_tree(m, u->child[1]); + } + if (u->child[0] != 0 && u->child[1] != 0) { + assert(chunksize(u->child[0]) < chunksize(u->child[1])); + } + } + u = u->fd; + } while (u != t); + assert(head != 0); +} + +/* Check all the chunks in a treebin. */ +static void do_check_treebin(mstate m, bindex_t i) { + tbinptr* tb = treebin_at(m, i); + tchunkptr t = *tb; + int empty = (m->treemap & (1U << i)) == 0; + if (t == 0) + assert(empty); + if (!empty) + do_check_tree(m, t); +} + +/* Check all the chunks in a smallbin. */ +static void do_check_smallbin(mstate m, bindex_t i) { + sbinptr b = smallbin_at(m, i); + mchunkptr p = b->bk; + unsigned int empty = (m->smallmap & (1U << i)) == 0; + if (p == b) + assert(empty); + if (!empty) { + for (; p != b; p = p->bk) { + size_t size = chunksize(p); + mchunkptr q; + /* each chunk claims to be free */ + do_check_free_chunk(m, p); + /* chunk belongs in bin */ + assert(small_index(size) == i); + assert(p->bk == b || chunksize(p->bk) == chunksize(p)); + /* chunk is followed by an inuse chunk */ + q = next_chunk(p); + if (q->head != FENCEPOST_HEAD) + do_check_inuse_chunk(m, q); + } + } +} + +/* Find x in a bin. Used in other check functions. */ +static int bin_find(mstate m, mchunkptr x) { + size_t size = chunksize(x); + if (is_small(size)) { + bindex_t sidx = small_index(size); + sbinptr b = smallbin_at(m, sidx); + if (smallmap_is_marked(m, sidx)) { + mchunkptr p = b; + do { + if (p == x) + return 1; + } while ((p = p->fd) != b); + } + } + else { + bindex_t tidx; + compute_tree_index(size, tidx); + if (treemap_is_marked(m, tidx)) { + tchunkptr t = *treebin_at(m, tidx); + size_t sizebits = size << leftshift_for_tree_index(tidx); + while (t != 0 && chunksize(t) != size) { + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + sizebits <<= 1; + } + if (t != 0) { + tchunkptr u = t; + do { + if (u == (tchunkptr)x) + return 1; + } while ((u = u->fd) != t); + } + } + } + return 0; +} + +/* Traverse each chunk and check it; return total */ +static size_t traverse_and_check(mstate m) { + size_t sum = 0; + if (is_initialized(m)) { + msegmentptr s = &m->seg; + sum += m->topsize + TOP_FOOT_SIZE; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + mchunkptr lastq = 0; + assert(pinuse(q)); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + sum += chunksize(q); + if (cinuse(q)) { + assert(!bin_find(m, q)); + do_check_inuse_chunk(m, q); + } + else { + assert(q == m->dv || bin_find(m, q)); + assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */ + do_check_free_chunk(m, q); + } + lastq = q; + q = next_chunk(q); + } + s = s->next; + } + } + return sum; +} + +/* Check all properties of malloc_state. */ +static void do_check_malloc_state(mstate m) { + bindex_t i; + size_t total; + /* check bins */ + for (i = 0; i < NSMALLBINS; ++i) + do_check_smallbin(m, i); + for (i = 0; i < NTREEBINS; ++i) + do_check_treebin(m, i); + + if (m->dvsize != 0) { /* check dv chunk */ + do_check_any_chunk(m, m->dv); + assert(m->dvsize == chunksize(m->dv)); + assert(m->dvsize >= MIN_CHUNK_SIZE); + assert(bin_find(m, m->dv) == 0); + } + + if (m->top != 0) { /* check top chunk */ + do_check_top_chunk(m, m->top); + assert(m->topsize == chunksize(m->top)); + assert(m->topsize > 0); + assert(bin_find(m, m->top) == 0); + } + + total = traverse_and_check(m); + assert(total <= m->footprint); + assert(m->footprint <= m->max_footprint); +} +#endif /* DEBUG */ + +/* ----------------------------- statistics ------------------------------ */ + +#if !NO_MALLINFO +static struct mallinfo internal_mallinfo(mstate m) { + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + if (!PREACTION(m)) { + check_malloc_state(m); + if (is_initialized(m)) { + size_t nfree = SIZE_T_ONE; /* top always free */ + size_t mfree = m->topsize + TOP_FOOT_SIZE; + size_t sum = mfree; + msegmentptr s = &m->seg; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + size_t sz = chunksize(q); + sum += sz; + if (!cinuse(q)) { + mfree += sz; + ++nfree; + } + q = next_chunk(q); + } + s = s->next; + } + + nm.arena = sum; + nm.ordblks = nfree; + nm.hblkhd = m->footprint - sum; + nm.usmblks = m->max_footprint; + nm.uordblks = m->footprint - mfree; + nm.fordblks = mfree; + nm.keepcost = m->topsize; + } + + POSTACTION(m); + } + return nm; +} +#endif /* !NO_MALLINFO */ + +static void internal_malloc_stats(mstate m) { + if (!PREACTION(m)) { + size_t maxfp = 0; + size_t fp = 0; + size_t used = 0; + check_malloc_state(m); + if (is_initialized(m)) { + msegmentptr s = &m->seg; + maxfp = m->max_footprint; + fp = m->footprint; + used = fp - (m->topsize + TOP_FOOT_SIZE); + + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + if (!cinuse(q)) + used -= chunksize(q); + q = next_chunk(q); + } + s = s->next; + } + } + + fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); + fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); + fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); + + POSTACTION(m); + } +} + +/* ----------------------- Operations on smallbins ----------------------- */ + +/* + Various forms of linking and unlinking are defined as macros. Even + the ones for trees, which are very long but have very short typical + paths. This is ugly but reduces reliance on inlining support of + compilers. +*/ + +/* Link a free chunk into a smallbin */ +#define insert_small_chunk(M, P, S) {\ + bindex_t I = small_index(S);\ + mchunkptr B = smallbin_at(M, I);\ + mchunkptr F = B;\ + assert(S >= MIN_CHUNK_SIZE);\ + if (!smallmap_is_marked(M, I))\ + mark_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, B->fd)))\ + F = B->fd;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + B->fd = P;\ + F->bk = P;\ + P->fd = F;\ + P->bk = B;\ +} + +/* Unlink a chunk from a smallbin */ +#define unlink_small_chunk(M, P, S) {\ + mchunkptr F = P->fd;\ + mchunkptr B = P->bk;\ + bindex_t I = small_index(S);\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (F == B)\ + clear_smallmap(M, I);\ + else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\ + (B == smallbin_at(M,I) || ok_address(M, B)))) {\ + F->bk = B;\ + B->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + +/* Unlink the first chunk from a smallbin */ +#define unlink_first_small_chunk(M, B, P, I) {\ + mchunkptr F = P->fd;\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (B == F)\ + clear_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, F))) {\ + B->fd = F;\ + F->bk = B;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + +/* Replace dv node, binning the old one */ +/* Used only when dvsize known to be small */ +#define replace_dv(M, P, S) {\ + size_t DVS = M->dvsize;\ + if (DVS != 0) {\ + mchunkptr DV = M->dv;\ + assert(is_small(DVS));\ + insert_small_chunk(M, DV, DVS);\ + }\ + M->dvsize = S;\ + M->dv = P;\ +} + +/* ------------------------- Operations on trees ------------------------- */ + +/* Insert chunk into tree */ +#define insert_large_chunk(M, X, S) {\ + tbinptr* H;\ + bindex_t I;\ + compute_tree_index(S, I);\ + H = treebin_at(M, I);\ + X->index = I;\ + X->child[0] = X->child[1] = 0;\ + if (!treemap_is_marked(M, I)) {\ + mark_treemap(M, I);\ + *H = X;\ + X->parent = (tchunkptr)H;\ + X->fd = X->bk = X;\ + }\ + else {\ + tchunkptr T = *H;\ + size_t K = S << leftshift_for_tree_index(I);\ + for (;;) {\ + if (chunksize(T) != S) {\ + tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ + K <<= 1;\ + if (*C != 0)\ + T = *C;\ + else if (RTCHECK(ok_address(M, C))) {\ + *C = X;\ + X->parent = T;\ + X->fd = X->bk = X;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + else {\ + tchunkptr F = T->fd;\ + if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ + T->fd = F->bk = X;\ + X->fd = F;\ + X->bk = T;\ + X->parent = 0;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + }\ + }\ +} + +/* + Unlink steps: + + 1. If x is a chained node, unlink it from its same-sized fd/bk links + and choose its bk node as its replacement. + 2. If x was the last node of its size, but not a leaf node, it must + be replaced with a leaf node (not merely one with an open left or + right), to make sure that lefts and rights of descendants + correspond properly to bit masks. We use the rightmost descendant + of x. We could use any other leaf, but this is easy to locate and + tends to counteract removal of leftmosts elsewhere, and so keeps + paths shorter than minimally guaranteed. This doesn't loop much + because on average a node in a tree is near the bottom. + 3. If x is the base of a chain (i.e., has parent links) relink + x's parent and children to x's replacement (or null if none). +*/ + +#define unlink_large_chunk(M, X) {\ + tchunkptr XP = X->parent;\ + tchunkptr R;\ + if (X->bk != X) {\ + tchunkptr F = X->fd;\ + R = X->bk;\ + if (RTCHECK(ok_address(M, F))) {\ + F->bk = R;\ + R->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else {\ + tchunkptr* RP;\ + if (((R = *(RP = &(X->child[1]))) != 0) ||\ + ((R = *(RP = &(X->child[0]))) != 0)) {\ + tchunkptr* CP;\ + while ((*(CP = &(R->child[1])) != 0) ||\ + (*(CP = &(R->child[0])) != 0)) {\ + R = *(RP = CP);\ + }\ + if (RTCHECK(ok_address(M, RP)))\ + *RP = 0;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + }\ + if (XP != 0) {\ + tbinptr* H = treebin_at(M, X->index);\ + if (X == *H) {\ + if ((*H = R) == 0) \ + clear_treemap(M, X->index);\ + }\ + else if (RTCHECK(ok_address(M, XP))) {\ + if (XP->child[0] == X) \ + XP->child[0] = R;\ + else \ + XP->child[1] = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + if (R != 0) {\ + if (RTCHECK(ok_address(M, R))) {\ + tchunkptr C0, C1;\ + R->parent = XP;\ + if ((C0 = X->child[0]) != 0) {\ + if (RTCHECK(ok_address(M, C0))) {\ + R->child[0] = C0;\ + C0->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + if ((C1 = X->child[1]) != 0) {\ + if (RTCHECK(ok_address(M, C1))) {\ + R->child[1] = C1;\ + C1->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ +} + +/* Relays to large vs small bin operations */ + +#define insert_chunk(M, P, S)\ + if (is_small(S)) insert_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } + +#define unlink_chunk(M, P, S)\ + if (is_small(S)) unlink_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } + + +/* Relays to internal calls to malloc/free from realloc, memalign etc */ + +#if ONLY_MSPACES +#define internal_malloc(m, b) mspace_malloc(m, b) +#define internal_free(m, mem) mspace_free(m,mem); +#else /* ONLY_MSPACES */ +#if MSPACES +#define internal_malloc(m, b)\ + (m == gm)? dlmalloc(b) : mspace_malloc(m, b) +#define internal_free(m, mem)\ + if (m == gm) dlfree(mem); else mspace_free(m,mem); +#else /* MSPACES */ +#define internal_malloc(m, b) dlmalloc(b) +#define internal_free(m, mem) dlfree(mem) +#endif /* MSPACES */ +#endif /* ONLY_MSPACES */ + +/* ----------------------- Direct-mmapping chunks ----------------------- */ + +/* + Directly mmapped chunks are set up with an offset to the start of + the mmapped region stored in the prev_foot field of the chunk. This + allows reconstruction of the required argument to MUNMAP when freed, + and also allows adjustment of the returned chunk to meet alignment + requirements (especially in memalign). There is also enough space + allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain + the PINUSE bit so frees can be checked. +*/ + +/* Malloc using mmap */ +static void* mmap_alloc(mstate m, size_t nb) { + size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + if (mmsize > nb) { /* Check for wrap around 0 */ + char* mm = (char*)(DIRECT_MMAP(mmsize)); + if (mm != CMFAIL) { + size_t offset = align_offset(chunk2mem(mm)); + size_t psize = mmsize - offset - MMAP_FOOT_PAD; + mchunkptr p = (mchunkptr)(mm + offset); + p->prev_foot = offset | IS_MMAPPED_BIT; + (p)->head = (psize|CINUSE_BIT); + mark_inuse_foot(m, p, psize); + chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; + + if (mm < m->least_addr) + m->least_addr = mm; + if ((m->footprint += mmsize) > m->max_footprint) + m->max_footprint = m->footprint; + assert(is_aligned(chunk2mem(p))); + check_mmapped_chunk(m, p); + return chunk2mem(p); + } + } + return 0; +} + +/* Realloc using mmap */ +static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) { + size_t oldsize = chunksize(oldp); + if (is_small(nb)) /* Can't shrink mmap regions below small size */ + return 0; + /* Keep old chunk if big enough but not too big */ + if (oldsize >= nb + SIZE_T_SIZE && + (oldsize - nb) <= (mparams.granularity << 1)) + return oldp; + else { + size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT; + size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; + size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES + + CHUNK_ALIGN_MASK); + char* cp = (char*)CALL_MREMAP((char*)oldp - offset, + oldmmsize, newmmsize, 1); + if (cp != CMFAIL) { + mchunkptr newp = (mchunkptr)(cp + offset); + size_t psize = newmmsize - offset - MMAP_FOOT_PAD; + newp->head = (psize|CINUSE_BIT); + mark_inuse_foot(m, newp, psize); + chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; + + if (cp < m->least_addr) + m->least_addr = cp; + if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) + m->max_footprint = m->footprint; + check_mmapped_chunk(m, newp); + return newp; + } + } + return 0; +} + +/* -------------------------- mspace management -------------------------- */ + +/* Initialize top chunk and its size */ +static void init_top(mstate m, mchunkptr p, size_t psize) { + /* Ensure alignment */ + size_t offset = align_offset(chunk2mem(p)); + p = (mchunkptr)((char*)p + offset); + psize -= offset; + + m->top = p; + m->topsize = psize; + p->head = psize | PINUSE_BIT; + /* set size of fake trailing chunk holding overhead space only once */ + chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; + m->trim_check = mparams.trim_threshold; /* reset on each update */ +} + +/* Initialize bins for a new mstate that is otherwise zeroed out */ +static void init_bins(mstate m) { + /* Establish circular links for smallbins */ + bindex_t i; + for (i = 0; i < NSMALLBINS; ++i) { + sbinptr bin = smallbin_at(m,i); + bin->fd = bin->bk = bin; + } +} + +#if PROCEED_ON_ERROR + +/* default corruption action */ +static void reset_on_error(mstate m) { + int i; + ++malloc_corruption_error_count; + /* Reinitialize fields to forget about all memory */ + m->smallbins = m->treebins = 0; + m->dvsize = m->topsize = 0; + m->seg.base = 0; + m->seg.size = 0; + m->seg.next = 0; + m->top = m->dv = 0; + for (i = 0; i < NTREEBINS; ++i) + *treebin_at(m, i) = 0; + init_bins(m); +} +#endif /* PROCEED_ON_ERROR */ + +/* Allocate chunk and prepend remainder with chunk in successor base. */ +static void* prepend_alloc(mstate m, char* newbase, char* oldbase, + size_t nb) { + mchunkptr p = align_as_chunk(newbase); + mchunkptr oldfirst = align_as_chunk(oldbase); + size_t psize = (char*)oldfirst - (char*)p; + mchunkptr q = chunk_plus_offset(p, nb); + size_t qsize = psize - nb; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + + assert((char*)oldfirst > (char*)q); + assert(pinuse(oldfirst)); + assert(qsize >= MIN_CHUNK_SIZE); + + /* consolidate remainder with first chunk of old base */ + if (oldfirst == m->top) { + size_t tsize = m->topsize += qsize; + m->top = q; + q->head = tsize | PINUSE_BIT; + check_top_chunk(m, q); + } + else if (oldfirst == m->dv) { + size_t dsize = m->dvsize += qsize; + m->dv = q; + set_size_and_pinuse_of_free_chunk(q, dsize); + } + else { + if (!cinuse(oldfirst)) { + size_t nsize = chunksize(oldfirst); + unlink_chunk(m, oldfirst, nsize); + oldfirst = chunk_plus_offset(oldfirst, nsize); + qsize += nsize; + } + set_free_with_pinuse(q, qsize, oldfirst); + insert_chunk(m, q, qsize); + check_free_chunk(m, q); + } + + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); +} + + +/* Add a segment to hold a new noncontiguous region */ +static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { + /* Determine locations and sizes of segment, fenceposts, old top */ + char* old_top = (char*)m->top; + msegmentptr oldsp = segment_holding(m, old_top); + char* old_end = oldsp->base + oldsp->size; + size_t ssize = pad_request(sizeof(struct malloc_segment)); + char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + size_t offset = align_offset(chunk2mem(rawsp)); + char* asp = rawsp + offset; + char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; + mchunkptr sp = (mchunkptr)csp; + msegmentptr ss = (msegmentptr)(chunk2mem(sp)); + mchunkptr tnext = chunk_plus_offset(sp, ssize); + mchunkptr p = tnext; + int nfences = 0; + + /* reset top to new space */ + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + + /* Set up segment record */ + assert(is_aligned(ss)); + set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); + *ss = m->seg; /* Push current record */ + m->seg.base = tbase; + m->seg.size = tsize; + (void)set_segment_flags(&m->seg, mmapped); + m->seg.next = ss; + + /* Insert trailing fenceposts */ + for (;;) { + mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); + p->head = FENCEPOST_HEAD; + ++nfences; + if ((char*)(&(nextp->head)) < old_end) + p = nextp; + else + break; + } + assert(nfences >= 2); + + /* Insert the rest of old top into a bin as an ordinary free chunk */ + if (csp != old_top) { + mchunkptr q = (mchunkptr)old_top; + size_t psize = csp - old_top; + mchunkptr tn = chunk_plus_offset(q, psize); + set_free_with_pinuse(q, psize, tn); + insert_chunk(m, q, psize); + } + + check_top_chunk(m, m->top); +} + +/* -------------------------- System allocation -------------------------- */ + +/* Get memory from system using MORECORE or MMAP */ +static void* sys_alloc(mstate m, size_t nb) { + char* tbase = CMFAIL; + size_t tsize = 0; + flag_t mmap_flag = 0; + + init_mparams(); + + /* Directly map large chunks */ + if (use_mmap(m) && nb >= mparams.mmap_threshold) { + void* mem = mmap_alloc(m, nb); + if (mem != 0) + return mem; + } + + /* + Try getting memory in any of three ways (in most-preferred to + least-preferred order): + 1. A call to MORECORE that can normally contiguously extend memory. + (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or + or main space is mmapped or a previous contiguous call failed) + 2. A call to MMAP new space (disabled if not HAVE_MMAP). + Note that under the default settings, if MORECORE is unable to + fulfill a request, and HAVE_MMAP is true, then mmap is + used as a noncontiguous system allocator. This is a useful backup + strategy for systems with holes in address spaces -- in this case + sbrk cannot contiguously expand the heap, but mmap may be able to + find space. + 3. A call to MORECORE that cannot usually contiguously extend memory. + (disabled if not HAVE_MORECORE) + */ + + if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { + char* br = CMFAIL; + msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); + size_t asize = 0; + ACQUIRE_MORECORE_LOCK(); + + if (ss == 0) { /* First time through or recovery */ + char* base = (char*)CALL_MORECORE(0); + if (base != CMFAIL) { + asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE); + /* Adjust to end on a page boundary */ + if (!is_page_aligned(base)) + asize += (page_align((size_t)base) - (size_t)base); + /* Can't call MORECORE if size is negative when treated as signed */ + if (asize < HALF_MAX_SIZE_T && + (br = (char*)(CALL_MORECORE(asize))) == base) { + tbase = base; + tsize = asize; + } + } + } + else { + /* Subtract out existing available top space from MORECORE request. */ + asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE); + /* Use mem here only if it did continuously extend old space */ + if (asize < HALF_MAX_SIZE_T && + (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) { + tbase = br; + tsize = asize; + } + } + + if (tbase == CMFAIL) { /* Cope with partial failure */ + if (br != CMFAIL) { /* Try to use/extend the space we did get */ + if (asize < HALF_MAX_SIZE_T && + asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) { + size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize); + if (esize < HALF_MAX_SIZE_T) { + char* end = (char*)CALL_MORECORE(esize); + if (end != CMFAIL) + asize += esize; + else { /* Can't use; try to release */ + (void)CALL_MORECORE(-asize); + br = CMFAIL; + } + } + } + } + if (br != CMFAIL) { /* Use the space we did get */ + tbase = br; + tsize = asize; + } + else + disable_contiguous(m); /* Don't try contiguous path in the future */ + } + + RELEASE_MORECORE_LOCK(); + } + + if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ + size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE; + size_t rsize = granularity_align(req); + if (rsize > nb) { /* Fail if wraps around zero */ + char* mp = (char*)(CALL_MMAP(rsize)); + if (mp != CMFAIL) { + tbase = mp; + tsize = rsize; + mmap_flag = IS_MMAPPED_BIT; + } + } + } + + if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ + size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE); + if (asize < HALF_MAX_SIZE_T) { + char* br = CMFAIL; + char* end = CMFAIL; + ACQUIRE_MORECORE_LOCK(); + br = (char*)(CALL_MORECORE(asize)); + end = (char*)(CALL_MORECORE(0)); + RELEASE_MORECORE_LOCK(); + if (br != CMFAIL && end != CMFAIL && br < end) { + size_t ssize = end - br; + if (ssize > nb + TOP_FOOT_SIZE) { + tbase = br; + tsize = ssize; + } + } + } + } + + if (tbase != CMFAIL) { + + if ((m->footprint += tsize) > m->max_footprint) + m->max_footprint = m->footprint; + + if (!is_initialized(m)) { /* first-time initialization */ + m->seg.base = m->least_addr = tbase; + m->seg.size = tsize; + (void)set_segment_flags(&m->seg, mmap_flag); + m->magic = mparams.magic; + init_bins(m); + if (is_global(m)) + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + else { + /* Offset top by embedded malloc_state */ + mchunkptr mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); + } + } + + else { + /* Try to merge with an existing segment */ + msegmentptr sp = &m->seg; + while (sp != 0 && tbase != sp->base + sp->size) + sp = sp->next; + if (sp != 0 && + !is_extern_segment(sp) && + check_segment_merge(sp, tbase, tsize) && + (get_segment_flags(sp) & IS_MMAPPED_BIT) == mmap_flag && + segment_holds(sp, m->top)) { /* append */ + sp->size += tsize; + init_top(m, m->top, m->topsize + tsize); + } + else { + if (tbase < m->least_addr) + m->least_addr = tbase; + sp = &m->seg; + while (sp != 0 && sp->base != tbase + tsize) + sp = sp->next; + if (sp != 0 && + !is_extern_segment(sp) && + check_segment_merge(sp, tbase, tsize) && + (get_segment_flags(sp) & IS_MMAPPED_BIT) == mmap_flag) { + char* oldbase = sp->base; + sp->base = tbase; + sp->size += tsize; + return prepend_alloc(m, tbase, oldbase, nb); + } + else + add_segment(m, tbase, tsize, mmap_flag); + } + } + + if (nb < m->topsize) { /* Allocate from new or extended top space */ + size_t rsize = m->topsize -= nb; + mchunkptr p = m->top; + mchunkptr r = m->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + check_top_chunk(m, m->top); + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); + } + } + + MALLOC_FAILURE_ACTION; + return 0; +} + +/* ----------------------- system deallocation -------------------------- */ + +/* Unmap and unlink any mmapped segments that don't contain used chunks */ +static size_t release_unused_segments(mstate m) { + size_t released = 0; + msegmentptr pred = &m->seg; + msegmentptr sp = pred->next; + while (sp != 0) { + char* base = sp->base; + size_t size = sp->size; + msegmentptr next = sp->next; + if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { + mchunkptr p = align_as_chunk(base); + size_t psize = chunksize(p); + /* Can unmap if first chunk holds entire segment and not pinned */ + if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { + tchunkptr tp = (tchunkptr)p; + assert(segment_holds(sp, (char*)sp)); + if (p == m->dv) { + m->dv = 0; + m->dvsize = 0; + } + else { + unlink_large_chunk(m, tp); + } + if (CALL_MUNMAP(base, size) == 0) { + released += size; + m->footprint -= size; + /* unlink obsoleted record */ + sp = pred; + sp->next = next; + } + else { /* back out if cannot unmap */ + insert_large_chunk(m, tp, psize); + } + } + } + pred = sp; + sp = next; + } + return released; +} + +static int sys_trim(mstate m, size_t pad) { + size_t released = 0; + if (pad < MAX_REQUEST && is_initialized(m)) { + pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ + + if (m->topsize > pad) { + /* Shrink top space in granularity-size units, keeping at least one */ + size_t unit = mparams.granularity; + size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - + SIZE_T_ONE) * unit; + msegmentptr sp = segment_holding(m, (char*)m->top); + + if (!is_extern_segment(sp)) { + if (is_mmapped_segment(sp)) { + if (HAVE_MMAP && + sp->size >= extra && + !has_segment_link(m, sp)) { /* can't shrink if pinned */ + size_t newsize = sp->size - extra; + /* Prefer mremap, fall back to munmap */ + if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || + (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { + released = extra; + } + } + } + else if (HAVE_MORECORE) { + if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ + extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; + ACQUIRE_MORECORE_LOCK(); + { + /* Make sure end of memory is where we last set it. */ + char* old_br = (char*)(CALL_MORECORE(0)); + if (old_br == sp->base + sp->size) { + char* rel_br = (char*)(CALL_MORECORE(-extra)); + char* new_br = (char*)(CALL_MORECORE(0)); + if (rel_br != CMFAIL && new_br < old_br) + released = old_br - new_br; + } + } + RELEASE_MORECORE_LOCK(); + } + } + + if (released != 0) { + sp->size -= released; + m->footprint -= released; + init_top(m, m->top, m->topsize - released); + check_top_chunk(m, m->top); + } + } + + /* Unmap any unused mmapped segments */ + if (HAVE_MMAP) + released += release_unused_segments(m); + + /* On failure, disable autotrim to avoid repeated failed future calls */ + if (released == 0) + m->trim_check = MAX_SIZE_T; + } + + return (released != 0)? 1 : 0; +} + +/* ---------------------------- malloc support --------------------------- */ + +/* allocate a large request from the best fitting chunk in a treebin */ +static void* tmalloc_large(mstate m, size_t nb) { + tchunkptr v = 0; + size_t rsize = -nb; /* Unsigned negation */ + tchunkptr t; + bindex_t idx; + compute_tree_index(nb, idx); + + if ((t = *treebin_at(m, idx)) != 0) { + /* Traverse tree for this bin looking for node with size == nb */ + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; /* The deepest untaken right subtree */ + for (;;) { + tchunkptr rt; + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + v = t; + if ((rsize = trem) == 0) + break; + } + rt = t->child[1]; + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + if (rt != 0 && rt != t) + rst = rt; + if (t == 0) { + t = rst; /* set t to least subtree holding sizes > nb */ + break; + } + sizebits <<= 1; + } + } + + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ + binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; + if (leftbits != 0) { + bindex_t i; + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + t = *treebin_at(m, i); + } + } + + while (t != 0) { /* find smallest of tree or subtree */ + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + t = leftmost_child(t); + } + + /* If dv is a better fit, return 0 so malloc will use it */ + if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { + if (RTCHECK(ok_address(m, v))) { /* split */ + mchunkptr r = chunk_plus_offset(v, nb); + assert(chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + insert_chunk(m, r, rsize); + } + return chunk2mem(v); + } + } + CORRUPTION_ERROR_ACTION(m); + } + return 0; +} + +/* allocate a small request from the best fitting chunk in a treebin */ +static void* tmalloc_small(mstate m, size_t nb) { + tchunkptr t, v; + size_t rsize; + bindex_t i; + binmap_t leastbit = least_bit(m->treemap); + compute_bit2idx(leastbit, i); + + v = t = *treebin_at(m, i); + rsize = chunksize(t) - nb; + + while ((t = leftmost_child(t)) != 0) { + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + } + + if (RTCHECK(ok_address(m, v))) { + mchunkptr r = chunk_plus_offset(v, nb); + assert(chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(m, r, rsize); + } + return chunk2mem(v); + } + } + + CORRUPTION_ERROR_ACTION(m); + return 0; +} + +/* --------------------------- realloc support --------------------------- */ + +static void* internal_realloc(mstate m, void* oldmem, size_t bytes) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + return 0; + } + if (!PREACTION(m)) { + mchunkptr oldp = mem2chunk(oldmem); + size_t oldsize = chunksize(oldp); + mchunkptr next = chunk_plus_offset(oldp, oldsize); + mchunkptr newp = 0; + void* extra = 0; + + /* Try to either shrink or extend into top. Else malloc-copy-free */ + + if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) && + ok_next(oldp, next) && ok_pinuse(next))) { + size_t nb = request2size(bytes); + if (is_mmapped(oldp)) + newp = mmap_resize(m, oldp, nb); + else if (oldsize >= nb) { /* already big enough */ + size_t rsize = oldsize - nb; + newp = oldp; + if (rsize >= MIN_CHUNK_SIZE) { + mchunkptr remainder = chunk_plus_offset(newp, nb); + set_inuse(m, newp, nb); + set_inuse(m, remainder, rsize); + extra = chunk2mem(remainder); + } + } + else if (next == m->top && oldsize + m->topsize > nb) { + /* Expand into top */ + size_t newsize = oldsize + m->topsize; + size_t newtopsize = newsize - nb; + mchunkptr newtop = chunk_plus_offset(oldp, nb); + set_inuse(m, oldp, nb); + newtop->head = newtopsize |PINUSE_BIT; + m->top = newtop; + m->topsize = newtopsize; + newp = oldp; + } + } + else { + USAGE_ERROR_ACTION(m, oldmem); + POSTACTION(m); + return 0; + } + + POSTACTION(m); + + if (newp != 0) { + if (extra != 0) { + internal_free(m, extra); + } + check_inuse_chunk(m, newp); + return chunk2mem(newp); + } + else { + void* newmem = internal_malloc(m, bytes); + if (newmem != 0) { + size_t oc = oldsize - overhead_for(oldp); + memcpy(newmem, oldmem, (oc < bytes)? oc : bytes); + internal_free(m, oldmem); + } + return newmem; + } + } + return 0; +} + +/* --------------------------- memalign support -------------------------- */ + +static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { + if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */ + return internal_malloc(m, bytes); + if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ + alignment = MIN_CHUNK_SIZE; + if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ + size_t a = MALLOC_ALIGNMENT << 1; + while (a < alignment) a <<= 1; + alignment = a; + } + + if (bytes >= MAX_REQUEST - alignment) { + if (m != 0) { /* Test isn't needed but avoids compiler warning */ + MALLOC_FAILURE_ACTION; + } + } + else { + size_t nb = request2size(bytes); + size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; + char* mem = (char*)internal_malloc(m, req); + if (mem != 0) { + void* leader = 0; + void* trailer = 0; + mchunkptr p = mem2chunk(mem); + + if (PREACTION(m)) return 0; + if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */ + /* + Find an aligned spot inside chunk. Since we need to give + back leading space in a chunk of at least MIN_CHUNK_SIZE, if + the first calculation places us at a spot with less than + MIN_CHUNK_SIZE leader, we can move to the next aligned spot. + We've allocated enough total room so that this is always + possible. + */ + char* br = (char*)mem2chunk((size_t)(((size_t)(mem + + alignment - + SIZE_T_ONE)) & + -alignment)); + char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? + br : br+alignment; + mchunkptr newp = (mchunkptr)pos; + size_t leadsize = pos - (char*)(p); + size_t newsize = chunksize(p) - leadsize; + + if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ + newp->prev_foot = p->prev_foot + leadsize; + newp->head = (newsize|CINUSE_BIT); + } + else { /* Otherwise, give back leader, use the rest */ + set_inuse(m, newp, newsize); + set_inuse(m, p, leadsize); + leader = chunk2mem(p); + } + p = newp; + } + + /* Give back spare room at the end */ + if (!is_mmapped(p)) { + size_t size = chunksize(p); + if (size > nb + MIN_CHUNK_SIZE) { + size_t remainder_size = size - nb; + mchunkptr remainder = chunk_plus_offset(p, nb); + set_inuse(m, p, nb); + set_inuse(m, remainder, remainder_size); + trailer = chunk2mem(remainder); + } + } + + assert (chunksize(p) >= nb); + assert((((size_t)(chunk2mem(p))) % alignment) == 0); + check_inuse_chunk(m, p); + POSTACTION(m); + if (leader != 0) { + internal_free(m, leader); + } + if (trailer != 0) { + internal_free(m, trailer); + } + return chunk2mem(p); + } + } + return 0; +} + +/* ------------------------ comalloc/coalloc support --------------------- */ + +static void** ialloc(mstate m, + size_t n_elements, + size_t* sizes, + int opts, + void* chunks[]) { + /* + This provides common support for independent_X routines, handling + all of the combinations that can result. + + The opts arg has: + bit 0 set if all elements are same size (using sizes[0]) + bit 1 set if elements should be zeroed + */ + + size_t element_size; /* chunksize of each element, if all same */ + size_t contents_size; /* total size of elements */ + size_t array_size; /* request size of pointer array */ + void* mem; /* malloced aggregate space */ + mchunkptr p; /* corresponding chunk */ + size_t remainder_size; /* remaining bytes while splitting */ + void** marray; /* either "chunks" or malloced ptr array */ + mchunkptr array_chunk; /* chunk for malloced ptr array */ + flag_t was_enabled; /* to disable mmap */ + size_t size; + size_t i; + + /* compute array length, if needed */ + if (chunks != 0) { + if (n_elements == 0) + return chunks; /* nothing to do */ + marray = chunks; + array_size = 0; + } + else { + /* if empty req, must still return chunk representing empty array */ + if (n_elements == 0) + return (void**)internal_malloc(m, 0); + marray = 0; + array_size = request2size(n_elements * (sizeof(void*))); + } + + /* compute total element size */ + if (opts & 0x1) { /* all-same-size */ + element_size = request2size(*sizes); + contents_size = n_elements * element_size; + } + else { /* add up all the sizes */ + element_size = 0; + contents_size = 0; + for (i = 0; i != n_elements; ++i) + contents_size += request2size(sizes[i]); + } + + size = contents_size + array_size; + + /* + Allocate the aggregate chunk. First disable direct-mmapping so + malloc won't use it, since we would not be able to later + free/realloc space internal to a segregated mmap region. + */ + was_enabled = use_mmap(m); + disable_mmap(m); + mem = internal_malloc(m, size - CHUNK_OVERHEAD); + if (was_enabled) + enable_mmap(m); + if (mem == 0) + return 0; + + if (PREACTION(m)) return 0; + p = mem2chunk(mem); + remainder_size = chunksize(p); + + assert(!is_mmapped(p)); + + if (opts & 0x2) { /* optionally clear the elements */ + memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); + } + + /* If not provided, allocate the pointer array as final part of chunk */ + if (marray == 0) { + size_t array_chunk_size; + array_chunk = chunk_plus_offset(p, contents_size); + array_chunk_size = remainder_size - contents_size; + marray = (void**) (chunk2mem(array_chunk)); + set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); + remainder_size = contents_size; + } + + /* split out elements */ + for (i = 0; ; ++i) { + marray[i] = chunk2mem(p); + if (i != n_elements-1) { + if (element_size != 0) + size = element_size; + else + size = request2size(sizes[i]); + remainder_size -= size; + set_size_and_pinuse_of_inuse_chunk(m, p, size); + p = chunk_plus_offset(p, size); + } + else { /* the final element absorbs any overallocation slop */ + set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); + break; + } + } + +#if DEBUG + if (marray != chunks) { + /* final element must have exactly exhausted chunk */ + if (element_size != 0) { + assert(remainder_size == element_size); + } + else { + assert(remainder_size == request2size(sizes[i])); + } + check_inuse_chunk(m, mem2chunk(marray)); + } + for (i = 0; i != n_elements; ++i) + check_inuse_chunk(m, mem2chunk(marray[i])); + +#endif /* DEBUG */ + + POSTACTION(m); + return marray; +} + + +/* -------------------------- public routines ---------------------------- */ + +#if !ONLY_MSPACES + +void* dlmalloc(size_t bytes) { + /* + Basic algorithm: + If a small request (< 256 bytes minus per-chunk overhead): + 1. If one exists, use a remainderless chunk in associated smallbin. + (Remainderless means that there are too few excess bytes to + represent as a chunk.) + 2. If it is big enough, use the dv chunk, which is normally the + chunk adjacent to the one used for the most recent small request. + 3. If one exists, split the smallest available chunk in a bin, + saving remainder in dv. + 4. If it is big enough, use the top chunk. + 5. If available, get memory from system and use it + Otherwise, for a large request: + 1. Find the smallest available binned chunk that fits, and use it + if it is better fitting than dv chunk, splitting if necessary. + 2. If better fitting than any binned chunk, use the dv chunk. + 3. If it is big enough, use the top chunk. + 4. If request size >= mmap threshold, try to directly mmap this chunk. + 5. If available, get memory from system and use it + + The ugly goto's here ensure that postaction occurs along all paths. + */ + + if (!PREACTION(gm)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = gm->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(gm, idx); + p = b->fd; + assert(chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(gm, b, p, idx); + set_inuse_and_pinuse(gm, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (nb > gm->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(gm, i); + p = b->fd; + assert(chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(gm, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(gm, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(gm, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + } + + if (nb <= gm->dvsize) { + size_t rsize = gm->dvsize - nb; + mchunkptr p = gm->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = gm->dv = chunk_plus_offset(p, nb); + gm->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + } + else { /* exhaust dv */ + size_t dvs = gm->dvsize; + gm->dvsize = 0; + gm->dv = 0; + set_inuse_and_pinuse(gm, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (nb < gm->topsize) { /* Split top */ + size_t rsize = gm->topsize -= nb; + mchunkptr p = gm->top; + mchunkptr r = gm->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + mem = chunk2mem(p); + check_top_chunk(gm, gm->top); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + mem = sys_alloc(gm, nb); + + postaction: + POSTACTION(gm); + return mem; + } + + return 0; +} + +void dlfree(void* mem) { + /* + Consolidate freed chunks with preceding or succeeding bordering + free chunks, if they exist, and then place in a bin. Intermixed + with special cases for top, dv, mmapped chunks, and usage errors. + */ + + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } +#else /* FOOTERS */ +#define fm gm +#endif /* FOOTERS */ + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if ((prevsize & IS_MMAPPED_BIT) != 0) { + prevsize &= ~IS_MMAPPED_BIT; + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + fm->footprint -= psize; + goto postaction; + } + else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (should_trim(fm, tsize)) + sys_trim(fm, 0); + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + insert_chunk(fm, p, psize); + check_free_chunk(fm, p); + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +#if !FOOTERS +#undef fm +#endif /* FOOTERS */ +} + +void* dlcalloc(size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = dlmalloc(req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + memset(mem, 0, req); + return mem; +} + +void* dlrealloc(void* oldmem, size_t bytes) { + if (oldmem == 0) + return dlmalloc(bytes); +#ifdef REALLOC_ZERO_BYTES_FREES + if (bytes == 0) { + dlfree(oldmem); + return 0; + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { +#if ! FOOTERS + mstate m = gm; +#else /* FOOTERS */ + mstate m = get_mstate_for(mem2chunk(oldmem)); + if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); + return 0; + } +#endif /* FOOTERS */ + return internal_realloc(m, oldmem, bytes); + } +} + +void* dlmemalign(size_t alignment, size_t bytes) { + return internal_memalign(gm, alignment, bytes); +} + +void** dlindependent_calloc(size_t n_elements, size_t elem_size, + void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ + return ialloc(gm, n_elements, &sz, 3, chunks); +} + +void** dlindependent_comalloc(size_t n_elements, size_t sizes[], + void* chunks[]) { + return ialloc(gm, n_elements, sizes, 0, chunks); +} + +void* dlvalloc(size_t bytes) { + size_t pagesz; + init_mparams(); + pagesz = mparams.page_size; + return dlmemalign(pagesz, bytes); +} + +void* dlpvalloc(size_t bytes) { + size_t pagesz; + init_mparams(); + pagesz = mparams.page_size; + return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); +} + +int dlmalloc_trim(size_t pad) { + int result = 0; + if (!PREACTION(gm)) { + result = sys_trim(gm, pad); + POSTACTION(gm); + } + return result; +} + +size_t dlmalloc_footprint(void) { + return gm->footprint; +} + +size_t dlmalloc_max_footprint(void) { + return gm->max_footprint; +} + +#if !NO_MALLINFO +struct mallinfo dlmallinfo(void) { + return internal_mallinfo(gm); +} +#endif /* NO_MALLINFO */ + +void dlmalloc_stats() { + internal_malloc_stats(gm); +} + +size_t dlmalloc_usable_size(void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + if (cinuse(p)) + return chunksize(p) - overhead_for(p); + } + return 0; +} + +int dlmallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + +#endif /* !ONLY_MSPACES */ + +/* ----------------------------- user mspaces ---------------------------- */ + +#if MSPACES + +static mstate init_user_mstate(char* tbase, size_t tsize) { + size_t msize = pad_request(sizeof(struct malloc_state)); + mchunkptr mn; + mchunkptr msp = align_as_chunk(tbase); + mstate m = (mstate)(chunk2mem(msp)); + memset(m, 0, msize); + INITIAL_LOCK(&m->mutex); + msp->head = (msize|PINUSE_BIT|CINUSE_BIT); + m->seg.base = m->least_addr = tbase; + m->seg.size = m->footprint = m->max_footprint = tsize; + m->magic = mparams.magic; + m->mflags = mparams.default_mflags; + disable_contiguous(m); + init_bins(m); + mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); + check_top_chunk(m, m->top); + return m; +} + +mspace create_mspace(size_t capacity, int locked) { + mstate m = 0; + size_t msize = pad_request(sizeof(struct malloc_state)); + init_mparams(); /* Ensure pagesize etc initialized */ + + if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + size_t rs = ((capacity == 0)? mparams.granularity : + (capacity + TOP_FOOT_SIZE + msize)); + size_t tsize = granularity_align(rs); + char* tbase = (char*)(CALL_MMAP(tsize)); + if (tbase != CMFAIL) { + m = init_user_mstate(tbase, tsize); + set_segment_flags(&m->seg, IS_MMAPPED_BIT); + set_lock(m, locked); + } + } + return (mspace)m; +} + +mspace create_mspace_with_base(void* base, size_t capacity, int locked) { + mstate m = 0; + size_t msize = pad_request(sizeof(struct malloc_state)); + init_mparams(); /* Ensure pagesize etc initialized */ + + if (capacity > msize + TOP_FOOT_SIZE && + capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + m = init_user_mstate((char*)base, capacity); + set_segment_flags(&m->seg, EXTERN_BIT); + set_lock(m, locked); + } + return (mspace)m; +} + +size_t destroy_mspace(mspace msp) { + size_t freed = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + msegmentptr sp = &ms->seg; + while (sp != 0) { + char* base = sp->base; + size_t size = sp->size; + flag_t flag = get_segment_flags(sp); + sp = sp->next; + if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) && + CALL_MUNMAP(base, size) == 0) + freed += size; + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return freed; +} + +/* + mspace versions of routines are near-clones of the global + versions. This is not so nice but better than the alternatives. +*/ + + +void* mspace_malloc(mspace msp, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (!PREACTION(ms)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = ms->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(ms, idx); + p = b->fd; + assert(chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(ms, b, p, idx); + set_inuse_and_pinuse(ms, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb > ms->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(ms, i); + p = b->fd; + assert(chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(ms, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(ms, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(ms, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + + if (nb <= ms->dvsize) { + size_t rsize = ms->dvsize - nb; + mchunkptr p = ms->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = ms->dv = chunk_plus_offset(p, nb); + ms->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + } + else { /* exhaust dv */ + size_t dvs = ms->dvsize; + ms->dvsize = 0; + ms->dv = 0; + set_inuse_and_pinuse(ms, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb < ms->topsize) { /* Split top */ + size_t rsize = ms->topsize -= nb; + mchunkptr p = ms->top; + mchunkptr r = ms->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + mem = chunk2mem(p); + check_top_chunk(ms, ms->top); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + mem = sys_alloc(ms, nb); + + postaction: + POSTACTION(ms); + return mem; + } + + return 0; +} + +void mspace_free(mspace msp, void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); +#else /* FOOTERS */ + mstate fm = (mstate)msp; +#endif /* FOOTERS */ + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if ((prevsize & IS_MMAPPED_BIT) != 0) { + prevsize &= ~IS_MMAPPED_BIT; + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + fm->footprint -= psize; + goto postaction; + } + else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (should_trim(fm, tsize)) + sys_trim(fm, 0); + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + insert_chunk(fm, p, psize); + check_free_chunk(fm, p); + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +} + +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = internal_malloc(ms, req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + memset(mem, 0, req); + return mem; +} + +void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { + if (oldmem == 0) + return mspace_malloc(msp, bytes); +#ifdef REALLOC_ZERO_BYTES_FREES + if (bytes == 0) { + mspace_free(msp, oldmem); + return 0; + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { +#if FOOTERS + mchunkptr p = mem2chunk(oldmem); + mstate ms = get_mstate_for(p); +#else /* FOOTERS */ + mstate ms = (mstate)msp; +#endif /* FOOTERS */ + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return internal_realloc(ms, oldmem, bytes); + } +} + +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return internal_memalign(ms, alignment, bytes); +} + +void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return ialloc(ms, n_elements, &sz, 3, chunks); +} + +void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return ialloc(ms, n_elements, sizes, 0, chunks); +} + +int mspace_trim(mspace msp, size_t pad) { + int result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + if (!PREACTION(ms)) { + result = sys_trim(ms, pad); + POSTACTION(ms); + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + +void mspace_malloc_stats(mspace msp) { + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + internal_malloc_stats(ms); + } + else { + USAGE_ERROR_ACTION(ms,ms); + } +} + +size_t mspace_footprint(mspace msp) { + size_t result; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->footprint; + } + USAGE_ERROR_ACTION(ms,ms); + return result; +} + + +size_t mspace_max_footprint(mspace msp) { + size_t result; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->max_footprint; + } + USAGE_ERROR_ACTION(ms,ms); + return result; +} + + +#if !NO_MALLINFO +struct mallinfo mspace_mallinfo(mspace msp) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + } + return internal_mallinfo(ms); +} +#endif /* NO_MALLINFO */ + +int mspace_mallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + +#endif /* MSPACES */ + +/* -------------------- Alternative MORECORE functions ------------------- */ + +/* + Guidelines for creating a custom version of MORECORE: + + * For best performance, MORECORE should allocate in multiples of pagesize. + * MORECORE may allocate more memory than requested. (Or even less, + but this will usually result in a malloc failure.) + * MORECORE must not allocate memory when given argument zero, but + instead return one past the end address of memory from previous + nonzero call. + * For best performance, consecutive calls to MORECORE with positive + arguments should return increasing addresses, indicating that + space has been contiguously extended. + * Even though consecutive calls to MORECORE need not return contiguous + addresses, it must be OK for malloc'ed chunks to span multiple + regions in those cases where they do happen to be contiguous. + * MORECORE need not handle negative arguments -- it may instead + just return MFAIL when given negative arguments. + Negative arguments are always multiples of pagesize. MORECORE + must not misinterpret negative args as large positive unsigned + args. You can suppress all such calls from even occurring by defining + MORECORE_CANNOT_TRIM, + + As an example alternative MORECORE, here is a custom allocator + kindly contributed for pre-OSX macOS. It uses virtually but not + necessarily physically contiguous non-paged memory (locked in, + present and won't get swapped out). You can use it by uncommenting + this section, adding some #includes, and setting up the appropriate + defines above: + + #define MORECORE osMoreCore + + There is also a shutdown routine that should somehow be called for + cleanup upon program exit. + + #define MAX_POOL_ENTRIES 100 + #define MINIMUM_MORECORE_SIZE (64 * 1024U) + static int next_os_pool; + void *our_os_pools[MAX_POOL_ENTRIES]; + + void *osMoreCore(int size) + { + void *ptr = 0; + static void *sbrk_top = 0; + + if (size > 0) + { + if (size < MINIMUM_MORECORE_SIZE) + size = MINIMUM_MORECORE_SIZE; + if (CurrentExecutionLevel() == kTaskLevel) + ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); + if (ptr == 0) + { + return (void *) MFAIL; + } + // save ptrs so they can be freed during cleanup + our_os_pools[next_os_pool] = ptr; + next_os_pool++; + ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); + sbrk_top = (char *) ptr + size; + return ptr; + } + else if (size < 0) + { + // we don't currently support shrink behavior + return (void *) MFAIL; + } + else + { + return sbrk_top; + } + } + + // cleanup any allocated memory pools + // called as last thing before shutting down driver + + void osCleanupMem(void) + { + void **ptr; + + for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) + if (*ptr) + { + PoolDeallocate(*ptr); + *ptr = 0; + } + } + +*/ + + +/* ----------------------------------------------------------------------- +History: + V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee) + * Add max_footprint functions + * Ensure all appropriate literals are size_t + * Fix conditional compilation problem for some #define settings + * Avoid concatenating segments with the one provided + in create_mspace_with_base + * Rename some variables to avoid compiler shadowing warnings + * Use explicit lock initialization. + * Better handling of sbrk interference. + * Simplify and fix segment insertion, trimming and mspace_destroy + * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x + * Thanks especially to Dennis Flanagan for help on these. + + V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee) + * Fix memalign brace error. + + V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee) + * Fix improper #endif nesting in C++ + * Add explicit casts needed for C++ + + V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee) + * Use trees for large bins + * Support mspaces + * Use segments to unify sbrk-based and mmap-based system allocation, + removing need for emulation on most platforms without sbrk. + * Default safety checks + * Optional footer checks. Thanks to William Robertson for the idea. + * Internal code refactoring + * Incorporate suggestions and platform-specific changes. + Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas, + Aaron Bachmann, Emery Berger, and others. + * Speed up non-fastbin processing enough to remove fastbins. + * Remove useless cfree() to avoid conflicts with other apps. + * Remove internal memcpy, memset. Compilers handle builtins better. + * Remove some options that no one ever used and rename others. + + V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) + * Fix malloc_state bitmap array misdeclaration + + V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee) + * Allow tuning of FIRST_SORTED_BIN_SIZE + * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte. + * Better detection and support for non-contiguousness of MORECORE. + Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger + * Bypass most of malloc if no frees. Thanks To Emery Berger. + * Fix freeing of old top non-contiguous chunk im sysmalloc. + * Raised default trim and map thresholds to 256K. + * Fix mmap-related #defines. Thanks to Lubos Lunak. + * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield. + * Branch-free bin calculation + * Default trim and mmap thresholds now 256K. + + V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) + * Introduce independent_comalloc and independent_calloc. + Thanks to Michael Pachos for motivation and help. + * Make optional .h file available + * Allow > 2GB requests on 32bit systems. + * new WIN32 sbrk, mmap, munmap, lock code from . + Thanks also to Andreas Mueller , + and Anonymous. + * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for + helping test this.) + * memalign: check alignment arg + * realloc: don't try to shift chunks backwards, since this + leads to more fragmentation in some programs and doesn't + seem to help in any others. + * Collect all cases in malloc requiring system memory into sysmalloc + * Use mmap as backup to sbrk + * Place all internal state in malloc_state + * Introduce fastbins (although similar to 2.5.1) + * Many minor tunings and cosmetic improvements + * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK + * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS + Thanks to Tony E. Bennett and others. + * Include errno.h to support default failure action. + + V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) + * return null for negative arguments + * Added Several WIN32 cleanups from Martin C. Fong + * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' + (e.g. WIN32 platforms) + * Cleanup header file inclusion for WIN32 platforms + * Cleanup code to avoid Microsoft Visual C++ compiler complaints + * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing + memory allocation routines + * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) + * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to + usage of 'assert' in non-WIN32 code + * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to + avoid infinite loop + * Always call 'fREe()' rather than 'free()' + + V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) + * Fixed ordering problem with boundary-stamping + + V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) + * Added pvalloc, as recommended by H.J. Liu + * Added 64bit pointer support mainly from Wolfram Gloger + * Added anonymously donated WIN32 sbrk emulation + * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen + * malloc_extend_top: fix mask error that caused wastage after + foreign sbrks + * Add linux mremap support code from HJ Liu + + V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) + * Integrated most documentation with the code. + * Add support for mmap, with help from + Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Use last_remainder in more cases. + * Pack bins using idea from colin@nyx10.cs.du.edu + * Use ordered bins instead of best-fit threshold + * Eliminate block-local decls to simplify tracing and debugging. + * Support another case of realloc via move into top + * Fix error occurring when initial sbrk_base not word-aligned. + * Rely on page size for units instead of SBRK_UNIT to + avoid surprises about sbrk alignment conventions. + * Add mallinfo, mallopt. Thanks to Raymond Nijssen + (raymond@es.ele.tue.nl) for the suggestion. + * Add `pad' argument to malloc_trim and top_pad mallopt parameter. + * More precautions for cases where other routines call sbrk, + courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Added macros etc., allowing use in linux libc from + H.J. Lu (hjl@gnu.ai.mit.edu) + * Inverted this history list + + V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) + * Re-tuned and fixed to behave more nicely with V2.6.0 changes. + * Removed all preallocation code since under current scheme + the work required to undo bad preallocations exceeds + the work saved in good cases for most test programs. + * No longer use return list or unconsolidated bins since + no scheme using them consistently outperforms those that don't + given above changes. + * Use best fit for very large chunks to prevent some worst-cases. + * Added some support for debugging + + V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) + * Removed footers when chunks are in use. Thanks to + Paul Wilson (wilson@cs.texas.edu) for the suggestion. + + V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) + * Added malloc_trim, with help from Wolfram Gloger + (wmglo@Dent.MED.Uni-Muenchen.DE). + + V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) + + V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) + * realloc: try to expand in both directions + * malloc: swap order of clean-bin strategy; + * realloc: only conditionally expand backwards + * Try not to scavenge used bins + * Use bin counts as a guide to preallocation + * Occasionally bin return list chunks in first scan + * Add a few optimizations from colin@nyx10.cs.du.edu + + V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) + * faster bin computation & slightly different binning + * merged all consolidations to one part of malloc proper + (eliminating old malloc_find_space & malloc_clean_bin) + * Scan 2 returns chunks (not just 1) + * Propagate failure in realloc if malloc returns 0 + * Add stuff to allow compilation on non-ANSI compilers + from kpv@research.att.com + + V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) + * removed potential for odd address access in prev_chunk + * removed dependency on getpagesize.h + * misc cosmetics and a bit more internal documentation + * anticosmetics: mangled names in macros to evade debugger strangeness + * tested on sparc, hp-700, dec-mips, rs6000 + with gcc & native cc (hp, dec only) allowing + Detlefs & Zorn comparison study (in SIGPLAN Notices.) + + Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) + * Based loosely on libg++-1.2X malloc. (It retains some of the overall + structure of old version, but most details differ.) + +*/ diff --git a/module/src/main/cpp/whale/src/libffi/ffi.h b/module/src/main/cpp/whale/src/libffi/ffi.h new file mode 100644 index 00000000..9f939b4b --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/ffi.h @@ -0,0 +1,24 @@ +#ifdef __aarch64__ + +#include + + +#endif +#ifdef __i386__ + +#include + + +#endif +#ifdef __arm__ + +#include + + +#endif +#ifdef __x86_64__ + +#include + + +#endif diff --git a/module/src/main/cpp/whale/src/libffi/ffi_cfi.h b/module/src/main/cpp/whale/src/libffi/ffi_cfi.h new file mode 100644 index 00000000..244ce572 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/ffi_cfi.h @@ -0,0 +1,55 @@ +/* ----------------------------------------------------------------------- + ffi_cfi.h - Copyright (c) 2014 Red Hat, Inc. + + Conditionally assemble cfi directives. Only necessary for building libffi. + ----------------------------------------------------------------------- */ + +#ifndef FFI_CFI_H +#define FFI_CFI_H + +#ifdef HAVE_AS_CFI_PSEUDO_OP + +# define cfi_startproc .cfi_startproc +# define cfi_endproc .cfi_endproc +# define cfi_def_cfa(reg, off) .cfi_def_cfa reg, off +# define cfi_def_cfa_register(reg) .cfi_def_cfa_register reg +# define cfi_def_cfa_offset(off) .cfi_def_cfa_offset off +# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off +# define cfi_offset(reg, off) .cfi_offset reg, off +# define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off +# define cfi_register(r1, r2) .cfi_register r1, r2 +# define cfi_return_column(reg) .cfi_return_column reg +# define cfi_restore(reg) .cfi_restore reg +# define cfi_same_value(reg) .cfi_same_value reg +# define cfi_undefined(reg) .cfi_undefined reg +# define cfi_remember_state .cfi_remember_state +# define cfi_restore_state .cfi_restore_state +# define cfi_window_save .cfi_window_save +# define cfi_personality(enc, exp) .cfi_personality enc, exp +# define cfi_lsda(enc, exp) .cfi_lsda enc, exp +# define cfi_escape(...) .cfi_escape __VA_ARGS__ + +#else + +# define cfi_startproc +# define cfi_endproc +# define cfi_def_cfa(reg, off) +# define cfi_def_cfa_register(reg) +# define cfi_def_cfa_offset(off) +# define cfi_adjust_cfa_offset(off) +# define cfi_offset(reg, off) +# define cfi_rel_offset(reg, off) +# define cfi_register(r1, r2) +# define cfi_return_column(reg) +# define cfi_restore(reg) +# define cfi_same_value(reg) +# define cfi_undefined(reg) +# define cfi_remember_state +# define cfi_restore_state +# define cfi_window_save +# define cfi_personality(enc, exp) +# define cfi_lsda(enc, exp) +# define cfi_escape(...) + +#endif /* HAVE_AS_CFI_PSEUDO_OP */ +#endif /* FFI_CFI_H */ diff --git a/module/src/main/cpp/whale/src/libffi/ffi_common.h b/module/src/main/cpp/whale/src/libffi/ffi_common.h new file mode 100644 index 00000000..ee9cdcb6 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/ffi_common.h @@ -0,0 +1,149 @@ +/* ----------------------------------------------------------------------- + ffi_common.h - Copyright (C) 2011, 2012, 2013 Anthony Green + Copyright (C) 2007 Free Software Foundation, Inc + Copyright (c) 1996 Red Hat, Inc. + + Common internal definitions and macros. Only necessary for building + libffi. + ----------------------------------------------------------------------- */ + +#ifndef FFI_COMMON_H +#define FFI_COMMON_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* Do not move this. Some versions of AIX are very picky about where + this is positioned. */ +#ifdef __GNUC__ +# if HAVE_ALLOCA_H +# include +# else + /* mingw64 defines this already in malloc.h. */ +# ifndef alloca +# define alloca __builtin_alloca +# endif +# endif +# define MAYBE_UNUSED __attribute__((__unused__)) +#else +# define MAYBE_UNUSED +# if HAVE_ALLOCA_H +# include +# else +# ifdef _AIX +# pragma alloca +# else +# ifndef alloca /* predefined by HP cc +Olibcalls */ +# ifdef _MSC_VER +# define alloca _alloca +# else +char *alloca (); +# endif +# endif +# endif +# endif +#endif + +/* Check for the existence of memcpy. */ +#if STDC_HEADERS +# include +#else +# ifndef HAVE_MEMCPY +# define memcpy(d, s, n) bcopy ((s), (d), (n)) +# endif +#endif + +#if defined(FFI_DEBUG) +#include +#endif + +#ifdef FFI_DEBUG +void ffi_assert(char *expr, char *file, int line); +void ffi_stop_here(void); +void ffi_type_test(ffi_type *a, char *file, int line); + +#define FFI_ASSERT(x) ((x) ? (void)0 : ffi_assert(#x, __FILE__,__LINE__)) +#define FFI_ASSERT_AT(x, f, l) ((x) ? 0 : ffi_assert(#x, (f), (l))) +#define FFI_ASSERT_VALID_TYPE(x) ffi_type_test (x, __FILE__, __LINE__) +#else +#define FFI_ASSERT(x) +#define FFI_ASSERT_AT(x, f, l) +#define FFI_ASSERT_VALID_TYPE(x) +#endif + +/* v cast to size_t and aligned up to a multiple of a */ +#define FFI_ALIGN(v, a) (((((size_t) (v))-1) | ((a)-1))+1) +/* v cast to size_t and aligned down to a multiple of a */ +#define FFI_ALIGN_DOWN(v, a) (((size_t) (v)) & -a) + +/* Perform machine dependent cif processing */ +ffi_status ffi_prep_cif_machdep(ffi_cif *cif); +ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, + unsigned int nfixedargs, unsigned int ntotalargs); + + +#if HAVE_LONG_DOUBLE_VARIANT +/* Used to adjust size/alignment of ffi types. */ +void ffi_prep_types (ffi_abi abi); +#endif + +/* Used internally, but overridden by some architectures */ +ffi_status ffi_prep_cif_core(ffi_cif *cif, + ffi_abi abi, + unsigned int isvariadic, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +/* Extended cif, used in callback from assembly routine */ +typedef struct +{ + ffi_cif *cif; + void *rvalue; + void **avalue; +} extended_cif; + +/* Terse sized type definitions. */ +#if defined(_MSC_VER) || defined(__sgi) || defined(__SUNPRO_C) +typedef unsigned char UINT8; +typedef signed char SINT8; +typedef unsigned short UINT16; +typedef signed short SINT16; +typedef unsigned int UINT32; +typedef signed int SINT32; +# ifdef _MSC_VER +typedef unsigned __int64 UINT64; +typedef signed __int64 SINT64; +# else +# include +typedef uint64_t UINT64; +typedef int64_t SINT64; +# endif +#else +typedef unsigned int UINT8 __attribute__((__mode__(__QI__))); +typedef signed int SINT8 __attribute__((__mode__(__QI__))); +typedef unsigned int UINT16 __attribute__((__mode__(__HI__))); +typedef signed int SINT16 __attribute__((__mode__(__HI__))); +typedef unsigned int UINT32 __attribute__((__mode__(__SI__))); +typedef signed int SINT32 __attribute__((__mode__(__SI__))); +typedef unsigned int UINT64 __attribute__((__mode__(__DI__))); +typedef signed int SINT64 __attribute__((__mode__(__DI__))); +#endif + +typedef float FLOAT32; + +#ifndef __GNUC__ +#define __builtin_expect(x, expected_value) (x) +#endif +#define LIKELY(x) __builtin_expect(!!(x),1) +#define UNLIKELY(x) __builtin_expect((x)!=0,0) + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/module/src/main/cpp/whale/src/libffi/ffi_cxx.cc b/module/src/main/cpp/whale/src/libffi/ffi_cxx.cc new file mode 100644 index 00000000..6f463d7d --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/ffi_cxx.cc @@ -0,0 +1,68 @@ +#include "ffi_cxx.h" + +FFICallInterface::~FFICallInterface() { + for (FFIClosure *closure : closures_) { + delete closure; + } + delete cif_; + delete types_; +} + + +void FFIDispatcher(ffi_cif *cif OPTION, void *ret, void **args, void *userdata) { + FFIClosure *closure = reinterpret_cast(userdata); + FFICallback callback = closure->GetCallback(); + callback(closure, ret, args, closure->GetUserData()); +} + + +FFIClosure *FFICallInterface::CreateClosure(void *userdata, FFICallback callback) { + std::lock_guard guard(lock_); + FFIClosure *closure = new FFIClosure(this, userdata, callback); + ffi_prep_closure_loc(closure->closure_, cif_, FFIDispatcher, closure, closure->code_); + closures_.push_back(closure); + return closure; +} + +static ffi_type *FFIGetCType(FFIType type) { + switch (type) { + case FFIType::kFFITypeVoid: + return &ffi_type_void; + case FFIType::kFFITypeU1: + return &ffi_type_uint8; + case FFIType::kFFITypeU2: + return &ffi_type_uint16; + case FFIType::kFFITypeU4: + return &ffi_type_uint32; + case FFIType::kFFITypeU8: + return &ffi_type_uint64; + case FFIType::kFFITypeS1: + return &ffi_type_sint8; + case FFIType::kFFITypeS2: + return &ffi_type_sint16; + case FFIType::kFFITypeS4: + return &ffi_type_sint32; + case FFIType::kFFITypeS8: + return &ffi_type_sint64; + case FFIType::kFFITypePointer: + return &ffi_type_pointer; + case FFIType::kFFITypeFloat: + return &ffi_type_float; + case FFIType::kFFITypeDouble: + return &ffi_type_double; + } +} + +FFICallInterface *FFICallInterface::FinalizeCif() { + cif_ = new ffi_cif; + types_ = new ffi_type *[parameters_.size()]; + int idx = 0; + for (FFIType type : parameters_) { + types_[idx] = FFIGetCType(type); + idx++; + } + ffi_prep_cif(cif_, FFI_DEFAULT_ABI, + (unsigned int) parameters_.size(), FFIGetCType(return_type_), types_); + return this; +} + diff --git a/module/src/main/cpp/whale/src/libffi/ffi_cxx.h b/module/src/main/cpp/whale/src/libffi/ffi_cxx.h new file mode 100644 index 00000000..002d7d38 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/ffi_cxx.h @@ -0,0 +1,117 @@ +#ifndef WHALE_FFI_CXX_H_ +#define WHALE_FFI_CXX_H_ + +#include +#include +#include +#include "ffi.h" +#include "base/macros.h" + +enum class FFIType { + kFFITypeVoid, + kFFITypeU1, + kFFITypeU2, + kFFITypeU4, + kFFITypeU8, + kFFITypeS1, + kFFITypeS2, + kFFITypeS4, + kFFITypeS8, + kFFITypePointer, + kFFITypeFloat, + kFFITypeDouble, +}; + +class FFICallInterface; + +class FFIClosure; + +typedef void (*FFICallback)(FFIClosure *closure, void *ret, void **args, void *userdata); + +class FFIClosure { + public: + FFIClosure(FFICallInterface *cif, void *userdata, FFICallback callback) : cif_(cif), + userdata_(userdata), + callback_(callback) { + closure_ = reinterpret_cast(ffi_closure_alloc(sizeof(ffi_closure), &code_)); + } + + ~FFIClosure() { + ffi_closure_free(closure_); + } + + void *GetCode() { + return code_; + } + + void *GetUserData() { + return userdata_; + } + + FFICallInterface *GetCif() { + return cif_; + } + + FFICallback GetCallback() { + return callback_; + } + + private: + friend class FFICallInterface; + + FFICallInterface *cif_; + ffi_closure *closure_; + FFICallback callback_; + void *code_; + void *userdata_; + +}; + +class FFICallInterface { + public: + FFICallInterface(const FFIType return_type) : return_type_(return_type) {} + + ~FFICallInterface(); + + FFICallInterface *Parameter(const FFIType parameter) { + parameters_.push_back(parameter); + return this; + } + + FFICallInterface *Parameters(unsigned int count, ...) { + va_list ap; + va_start(ap, count); + while (count-- > 0) { + Parameter(va_arg(ap, FFIType)); + } + va_end(ap); + return this; + } + + FFICallInterface *FinalizeCif(); + + size_t GetParameterCount() { + return parameters_.size(); + } + + std::list GetParameters() { + return parameters_; + } + + FFIClosure *CreateClosure(void *userdata, FFICallback callback); + + void RemoveClosure(FFIClosure *closure) { + std::lock_guard guard(lock_); + closures_.remove(closure); + } + + private: + std::mutex lock_; + ffi_cif *cif_; + ffi_type **types_; + std::list parameters_; + const FFIType return_type_; + std::list closures_; +}; + +#endif //WHALE_FFI_CXX_H_ diff --git a/module/src/main/cpp/whale/src/libffi/fficonfig.h b/module/src/main/cpp/whale/src/libffi/fficonfig.h new file mode 100644 index 00000000..cc2ec1b7 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/fficonfig.h @@ -0,0 +1,21 @@ +#if defined(__aarch64__) || defined(__arm64__) +#include + + +#endif +#ifdef __i386__ + +#include + + +#endif +#ifdef __arm__ + +#include + + +#endif +#ifdef __x86_64__ + +#include +#endif diff --git a/module/src/main/cpp/whale/src/libffi/ffitarget.h b/module/src/main/cpp/whale/src/libffi/ffitarget.h new file mode 100644 index 00000000..af6be31d --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/ffitarget.h @@ -0,0 +1,24 @@ +#ifdef __aarch64__ + +#include + + +#endif +#ifdef __i386__ + +#include + + +#endif +#ifdef __arm__ + +#include + + +#endif +#ifdef __x86_64__ + +#include + + +#endif diff --git a/module/src/main/cpp/whale/src/libffi/java_raw_api.c b/module/src/main/cpp/whale/src/libffi/java_raw_api.c new file mode 100644 index 00000000..114d3e47 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/java_raw_api.c @@ -0,0 +1,374 @@ +/* ----------------------------------------------------------------------- + java_raw_api.c - Copyright (c) 1999, 2007, 2008 Red Hat, Inc. + + Cloned from raw_api.c + + Raw_api.c author: Kresten Krab Thorup + Java_raw_api.c author: Hans-J. Boehm + + $Id $ + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* This defines a Java- and 64-bit specific variant of the raw API. */ +/* It assumes that "raw" argument blocks look like Java stacks on a */ +/* 64-bit machine. Arguments that can be stored in a single stack */ +/* stack slots (longs, doubles) occupy 128 bits, but only the first */ +/* 64 bits are actually used. */ + +#include +#include +#include + +#if !defined(NO_JAVA_RAW_API) + +size_t +ffi_java_raw_size (ffi_cif *cif) +{ + size_t result = 0; + int i; + + ffi_type **at = cif->arg_types; + + for (i = cif->nargs-1; i >= 0; i--, at++) + { + switch((*at) -> type) { + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + result += 2 * FFI_SIZEOF_JAVA_RAW; + break; + case FFI_TYPE_STRUCT: + /* No structure parameters in Java. */ + abort(); + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + default: + result += FFI_SIZEOF_JAVA_RAW; + } + } + + return result; +} + + +void +ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + +#if WORDS_BIGENDIAN + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + *args = (void*) ((char*)(raw++) + 3); + break; + + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + *args = (void*) ((char*)(raw++) + 2); + break; + +#if FFI_SIZEOF_JAVA_RAW == 8 + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + *args = (void *)raw; + raw += 2; + break; +#endif + + case FFI_TYPE_POINTER: + *args = (void*) &(raw++)->ptr; + break; + + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + + default: + *args = raw; + raw += + FFI_ALIGN ((*tp)->size, sizeof(ffi_java_raw)) / sizeof(ffi_java_raw); + } + } + +#else /* WORDS_BIGENDIAN */ + +#if !PDP + + /* then assume little endian */ + for (i = 0; i < cif->nargs; i++, tp++, args++) + { +#if FFI_SIZEOF_JAVA_RAW == 8 + switch((*tp)->type) { + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + *args = (void*) raw; + raw += 2; + break; + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + default: + *args = (void*) raw++; + } +#else /* FFI_SIZEOF_JAVA_RAW != 8 */ + *args = (void*) raw; + raw += + FFI_ALIGN ((*tp)->size, sizeof(ffi_java_raw)) / sizeof(ffi_java_raw); +#endif /* FFI_SIZEOF_JAVA_RAW == 8 */ + } + +#else +#error "pdp endian not supported" +#endif /* ! PDP */ + +#endif /* WORDS_BIGENDIAN */ +} + +void +ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: +#if WORDS_BIGENDIAN + *(UINT32*)(raw++) = *(UINT8*) (*args); +#else + (raw++)->uint = *(UINT8*) (*args); +#endif + break; + + case FFI_TYPE_SINT8: +#if WORDS_BIGENDIAN + *(SINT32*)(raw++) = *(SINT8*) (*args); +#else + (raw++)->sint = *(SINT8*) (*args); +#endif + break; + + case FFI_TYPE_UINT16: +#if WORDS_BIGENDIAN + *(UINT32*)(raw++) = *(UINT16*) (*args); +#else + (raw++)->uint = *(UINT16*) (*args); +#endif + break; + + case FFI_TYPE_SINT16: +#if WORDS_BIGENDIAN + *(SINT32*)(raw++) = *(SINT16*) (*args); +#else + (raw++)->sint = *(SINT16*) (*args); +#endif + break; + + case FFI_TYPE_UINT32: +#if WORDS_BIGENDIAN + *(UINT32*)(raw++) = *(UINT32*) (*args); +#else + (raw++)->uint = *(UINT32*) (*args); +#endif + break; + + case FFI_TYPE_SINT32: +#if WORDS_BIGENDIAN + *(SINT32*)(raw++) = *(SINT32*) (*args); +#else + (raw++)->sint = *(SINT32*) (*args); +#endif + break; + + case FFI_TYPE_FLOAT: + (raw++)->flt = *(FLOAT32*) (*args); + break; + +#if FFI_SIZEOF_JAVA_RAW == 8 + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_DOUBLE: + raw->uint = *(UINT64*) (*args); + raw += 2; + break; +#endif + + case FFI_TYPE_POINTER: + (raw++)->ptr = **(void***) args; + break; + + default: +#if FFI_SIZEOF_JAVA_RAW == 8 + FFI_ASSERT(0); /* Should have covered all cases */ +#else + memcpy ((void*) raw->data, (void*)*args, (*tp)->size); + raw += + FFI_ALIGN ((*tp)->size, sizeof(ffi_java_raw)) / sizeof(ffi_java_raw); +#endif + } + } +} + +#if !FFI_NATIVE_RAW_API + +static void +ffi_java_rvalue_to_raw (ffi_cif *cif, void *rvalue) +{ +#if WORDS_BIGENDIAN && FFI_SIZEOF_ARG == 8 + switch (cif->rtype->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_UINT32: + *(UINT64 *)rvalue <<= 32; + break; + + case FFI_TYPE_SINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_INT: +#if FFI_SIZEOF_JAVA_RAW == 4 + case FFI_TYPE_POINTER: +#endif + *(SINT64 *)rvalue <<= 32; + break; + + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + + default: + break; + } +#endif +} + +static void +ffi_java_raw_to_rvalue (ffi_cif *cif, void *rvalue) +{ +#if WORDS_BIGENDIAN && FFI_SIZEOF_ARG == 8 + switch (cif->rtype->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_UINT32: + *(UINT64 *)rvalue >>= 32; + break; + + case FFI_TYPE_SINT8: + case FFI_TYPE_SINT16: + case FFI_TYPE_SINT32: + case FFI_TYPE_INT: + *(SINT64 *)rvalue >>= 32; + break; + + case FFI_TYPE_COMPLEX: + /* Not supported yet. */ + abort(); + + default: + break; + } +#endif +} + +/* This is a generic definition of ffi_raw_call, to be used if the + * native system does not provide a machine-specific implementation. + * Having this, allows code to be written for the raw API, without + * the need for system-specific code to handle input in that format; + * these following couple of functions will handle the translation forth + * and back automatically. */ + +void ffi_java_raw_call (ffi_cif *cif, void (*fn)(void), void *rvalue, + ffi_java_raw *raw) +{ + void **avalue = (void**) alloca (cif->nargs * sizeof (void*)); + ffi_java_raw_to_ptrarray (cif, raw, avalue); + ffi_call (cif, fn, rvalue, avalue); + ffi_java_rvalue_to_raw (cif, rvalue); +} + +#if FFI_CLOSURES /* base system provides closures */ + +static void +ffi_java_translate_args (ffi_cif *cif, void *rvalue, + void **avalue, void *user_data) +{ + ffi_java_raw *raw = (ffi_java_raw*)alloca (ffi_java_raw_size (cif)); + ffi_raw_closure *cl = (ffi_raw_closure*)user_data; + + ffi_java_ptrarray_to_raw (cif, avalue, raw); + (*cl->fun) (cif, rvalue, (ffi_raw*)raw, cl->user_data); + ffi_java_raw_to_rvalue (cif, rvalue); +} + +ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc) +{ + ffi_status status; + + status = ffi_prep_closure_loc ((ffi_closure*) cl, + cif, + &ffi_java_translate_args, + codeloc, + codeloc); + if (status == FFI_OK) + { + cl->fun = fun; + cl->user_data = user_data; + } + + return status; +} + +/* Again, here is the generic version of ffi_prep_raw_closure, which + * will install an intermediate "hub" for translation of arguments from + * the pointer-array format, to the raw format */ + +ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data) +{ + return ffi_prep_java_raw_closure_loc (cl, cif, fun, user_data, cl); +} + +#endif /* FFI_CLOSURES */ +#endif /* !FFI_NATIVE_RAW_API */ +#endif /* !NO_JAVA_RAW_API */ diff --git a/module/src/main/cpp/whale/src/libffi/platform_include/ffi_arm64.h b/module/src/main/cpp/whale/src/libffi/platform_include/ffi_arm64.h new file mode 100644 index 00000000..7fe43752 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/platform_include/ffi_arm64.h @@ -0,0 +1,516 @@ +#if defined(__aarch64__) || defined(__arm64__) + +/* -----------------------------------------------------------------*-C-*- + libffi 3.3-rc0 - Copyright (c) 2011, 2014 Anthony Green + - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the ``Software''), to deal in the Software without + restriction, including without limitation the rights to use, copy, + modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +/* ------------------------------------------------------------------- + Most of the API is documented in doc/libffi.texi. + + The raw API is designed to bypass some of the argument packing and + unpacking on architectures for which it can be avoided. Routines + are provided to emulate the raw API if the underlying platform + doesn't allow faster implementation. + + More details on the raw API can be found in: + + http://gcc.gnu.org/ml/java/1999-q3/msg00138.html + + and + + http://gcc.gnu.org/ml/java/1999-q3/msg00174.html + -------------------------------------------------------------------- */ + +#ifndef LIBFFI_H +#define LIBFFI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Specify which architecture libffi is configured for. */ +#ifndef AARCH64 +#define AARCH64 +#endif + +/* ---- System configuration information --------------------------------- */ + +#include + +#ifndef LIBFFI_ASM + +#if defined(_MSC_VER) && !defined(__clang__) +#define __attribute__(X) +#endif + +#include +#include + +/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example). + But we can find it either under the correct ANSI name, or under GNU + C's internal name. */ + +#define FFI_64_BIT_MAX 9223372036854775807 + +#ifdef LONG_LONG_MAX +# define FFI_LONG_LONG_MAX LONG_LONG_MAX +#else +# ifdef LLONG_MAX +# define FFI_LONG_LONG_MAX LLONG_MAX +# ifdef _AIX52 /* or newer has C99 LLONG_MAX */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif /* _AIX52 or newer */ +# else +# ifdef __GNUC__ +# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__ +# endif +# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */ +# ifndef __PPC64__ +# if defined (__IBMC__) || defined (__IBMCPP__) +# define FFI_LONG_LONG_MAX LONGLONG_MAX +# endif +# endif /* __PPC64__ */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif +# endif +#endif + +/* The closure code assumes that this works on pointers, i.e. a size_t + can hold a pointer. */ + +typedef struct _ffi_type +{ + size_t size; + unsigned short alignment; + unsigned short type; + struct _ffi_type **elements; +} ffi_type; + +/* Need minimal decorations for DLLs to work on Windows. GCC has + autoimport and autoexport. Always mark externally visible symbols + as dllimport for MSVC clients, even if it means an extra indirection + when using the static version of the library. + Besides, as a workaround, they can define FFI_BUILDING if they + *know* they are going to link with the static library. */ +#if defined _MSC_VER +# if defined FFI_BUILDING_DLL /* Building libffi.DLL with msvcc.sh */ +# define FFI_API __declspec(dllexport) +# elif !defined FFI_BUILDING /* Importing libffi.DLL */ +# define FFI_API __declspec(dllimport) +# else /* Building/linking static library */ +# define FFI_API +# endif +#else +# define FFI_API +#endif + +/* The externally visible type declarations also need the MSVC DLL + decorations, or they will not be exported from the object file. */ +#if defined LIBFFI_HIDE_BASIC_TYPES +# define FFI_EXTERN FFI_API +#else +# define FFI_EXTERN extern FFI_API +#endif + +#ifndef LIBFFI_HIDE_BASIC_TYPES +#if SCHAR_MAX == 127 +# define ffi_type_uchar ffi_type_uint8 +# define ffi_type_schar ffi_type_sint8 +#else + #error "char size not supported" +#endif + +#if SHRT_MAX == 32767 +# define ffi_type_ushort ffi_type_uint16 +# define ffi_type_sshort ffi_type_sint16 +#elif SHRT_MAX == 2147483647 +# define ffi_type_ushort ffi_type_uint32 +# define ffi_type_sshort ffi_type_sint32 +#else + #error "short size not supported" +#endif + +#if INT_MAX == 32767 +# define ffi_type_uint ffi_type_uint16 +# define ffi_type_sint ffi_type_sint16 +#elif INT_MAX == 2147483647 +# define ffi_type_uint ffi_type_uint32 +# define ffi_type_sint ffi_type_sint32 +#elif INT_MAX == 9223372036854775807 +# define ffi_type_uint ffi_type_uint64 +# define ffi_type_sint ffi_type_sint64 +#else + #error "int size not supported" +#endif + +#if LONG_MAX == 2147483647 +# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX + #error "no 64-bit data type supported" +# endif +#elif LONG_MAX != FFI_64_BIT_MAX + #error "long size not supported" +#endif + +#if LONG_MAX == 2147483647 +# define ffi_type_ulong ffi_type_uint32 +# define ffi_type_slong ffi_type_sint32 +#elif LONG_MAX == FFI_64_BIT_MAX +# define ffi_type_ulong ffi_type_uint64 +# define ffi_type_slong ffi_type_sint64 +#else + #error "long size not supported" +#endif + +/* These are defined in types.c. */ +FFI_EXTERN ffi_type ffi_type_void; +FFI_EXTERN ffi_type ffi_type_uint8; +FFI_EXTERN ffi_type ffi_type_sint8; +FFI_EXTERN ffi_type ffi_type_uint16; +FFI_EXTERN ffi_type ffi_type_sint16; +FFI_EXTERN ffi_type ffi_type_uint32; +FFI_EXTERN ffi_type ffi_type_sint32; +FFI_EXTERN ffi_type ffi_type_uint64; +FFI_EXTERN ffi_type ffi_type_sint64; +FFI_EXTERN ffi_type ffi_type_float; +FFI_EXTERN ffi_type ffi_type_double; +FFI_EXTERN ffi_type ffi_type_pointer; + +#if 1 +FFI_EXTERN ffi_type ffi_type_longdouble; +#else +#define ffi_type_longdouble ffi_type_double +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_EXTERN ffi_type ffi_type_complex_float; +FFI_EXTERN ffi_type ffi_type_complex_double; +#if 1 +FFI_EXTERN ffi_type ffi_type_complex_longdouble; +#else +#define ffi_type_complex_longdouble ffi_type_complex_double +#endif +#endif +#endif /* LIBFFI_HIDE_BASIC_TYPES */ + +typedef enum { + FFI_OK = 0, + FFI_BAD_TYPEDEF, + FFI_BAD_ABI +} ffi_status; + +typedef struct { + ffi_abi abi; + unsigned nargs; + ffi_type **arg_types; + ffi_type *rtype; + unsigned bytes; + unsigned flags; +#ifdef FFI_EXTRA_CIF_FIELDS + FFI_EXTRA_CIF_FIELDS; +#endif +} ffi_cif; + +/* ---- Definitions for the raw API -------------------------------------- */ + +#ifndef FFI_SIZEOF_ARG +# if LONG_MAX == 2147483647 +# define FFI_SIZEOF_ARG 4 +# elif LONG_MAX == FFI_64_BIT_MAX +# define FFI_SIZEOF_ARG 8 +# endif +#endif + +#ifndef FFI_SIZEOF_JAVA_RAW +# define FFI_SIZEOF_JAVA_RAW FFI_SIZEOF_ARG +#endif + +typedef union { + ffi_sarg sint; + ffi_arg uint; + float flt; + char data[FFI_SIZEOF_ARG]; + void* ptr; +} ffi_raw; + +#if FFI_SIZEOF_JAVA_RAW == 4 && FFI_SIZEOF_ARG == 8 +/* This is a special case for mips64/n32 ABI (and perhaps others) where + sizeof(void *) is 4 and FFI_SIZEOF_ARG is 8. */ +typedef union { + signed int sint; + unsigned int uint; + float flt; + char data[FFI_SIZEOF_JAVA_RAW]; + void* ptr; +} ffi_java_raw; +#else +typedef ffi_raw ffi_java_raw; +#endif + + +FFI_API +void ffi_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_raw *avalue); + +FFI_API void ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw); +FFI_API void ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args); +FFI_API size_t ffi_raw_size (ffi_cif *cif); + +/* This is analogous to the raw API, except it uses Java parameter + packing, even on 64-bit machines. I.e. on 64-bit machines longs + and doubles are followed by an empty 64-bit word. */ + +FFI_API +void ffi_java_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_java_raw *avalue); + +FFI_API +void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw); +FFI_API +void ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args); +FFI_API +size_t ffi_java_raw_size (ffi_cif *cif); + +/* ---- Definitions for closures ----------------------------------------- */ + +#if FFI_CLOSURES + +#ifdef _MSC_VER +__declspec(align(8)) +#endif +typedef struct { +#if FFI_EXEC_TRAMPOLINE_TABLE + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); + void *user_data; +} ffi_closure +#ifdef __GNUC__ + __attribute__((aligned (8))) +#endif + ; + +#ifndef __GNUC__ +# ifdef __sgi +# pragma pack 0 +# endif +#endif + +FFI_API void *ffi_closure_alloc (size_t size, void **code); +FFI_API void ffi_closure_free (void *); + +FFI_API ffi_status +ffi_prep_closure (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 405) + __attribute__((deprecated ("use ffi_prep_closure_loc instead"))) +#elif defined(__GNUC__) && __GNUC__ >= 3 + __attribute__((deprecated)) +#endif + ; + +FFI_API ffi_status +ffi_prep_closure_loc (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void*codeloc); + +#ifdef __sgi +# pragma pack 8 +#endif +typedef struct { +#if FFI_EXEC_TRAMPOLINE_TABLE + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the transaltion, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_raw*,void*); + void *user_data; + +} ffi_raw_closure; + +typedef struct { +#if FFI_EXEC_TRAMPOLINE_TABLE + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the translation, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*); + void *user_data; + +} ffi_java_raw_closure; + +FFI_API ffi_status +ffi_prep_raw_closure (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc); + +FFI_API ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc); + +#endif /* FFI_CLOSURES */ + +#if FFI_GO_CLOSURES + +typedef struct { + void *tramp; + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); +} ffi_go_closure; + +FFI_API ffi_status ffi_prep_go_closure (ffi_go_closure*, ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*)); + +FFI_API void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); + +#endif /* FFI_GO_CLOSURES */ + +/* ---- Public interface definition -------------------------------------- */ + +FFI_API +ffi_status ffi_prep_cif(ffi_cif *cif, + ffi_abi abi, + unsigned int nargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue); + +FFI_API +ffi_status ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, + size_t *offsets); + +/* Useful for eliminating compiler warnings. */ +#define FFI_FN(f) ((void (*)(void))f) + +/* ---- Definitions shared with assembly code ---------------------------- */ + +#endif + +/* If these change, update src/mips/ffitarget.h. */ +#define FFI_TYPE_VOID 0 +#define FFI_TYPE_INT 1 +#define FFI_TYPE_FLOAT 2 +#define FFI_TYPE_DOUBLE 3 +#if 1 +#define FFI_TYPE_LONGDOUBLE 4 +#else +#define FFI_TYPE_LONGDOUBLE FFI_TYPE_DOUBLE +#endif +#define FFI_TYPE_UINT8 5 +#define FFI_TYPE_SINT8 6 +#define FFI_TYPE_UINT16 7 +#define FFI_TYPE_SINT16 8 +#define FFI_TYPE_UINT32 9 +#define FFI_TYPE_SINT32 10 +#define FFI_TYPE_UINT64 11 +#define FFI_TYPE_SINT64 12 +#define FFI_TYPE_STRUCT 13 +#define FFI_TYPE_POINTER 14 +#define FFI_TYPE_COMPLEX 15 + +/* This should always refer to the last type code (for sanity checks). */ +#define FFI_TYPE_LAST FFI_TYPE_COMPLEX + +#ifdef __cplusplus +} +#endif + +#endif + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/platform_include/ffi_armv7.h b/module/src/main/cpp/whale/src/libffi/platform_include/ffi_armv7.h new file mode 100644 index 00000000..330a5b43 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/platform_include/ffi_armv7.h @@ -0,0 +1,516 @@ +#ifdef __arm__ + +/* -----------------------------------------------------------------*-C-*- + libffi 3.3-rc0 - Copyright (c) 2011, 2014 Anthony Green + - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the ``Software''), to deal in the Software without + restriction, including without limitation the rights to use, copy, + modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +/* ------------------------------------------------------------------- + Most of the API is documented in doc/libffi.texi. + + The raw API is designed to bypass some of the argument packing and + unpacking on architectures for which it can be avoided. Routines + are provided to emulate the raw API if the underlying platform + doesn't allow faster implementation. + + More details on the raw API can be found in: + + http://gcc.gnu.org/ml/java/1999-q3/msg00138.html + + and + + http://gcc.gnu.org/ml/java/1999-q3/msg00174.html + -------------------------------------------------------------------- */ + +#ifndef LIBFFI_H +#define LIBFFI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Specify which architecture libffi is configured for. */ +#ifndef ARM +#define ARM +#endif + +/* ---- System configuration information --------------------------------- */ + +#include + +#ifndef LIBFFI_ASM + +#if defined(_MSC_VER) && !defined(__clang__) +#define __attribute__(X) +#endif + +#include +#include + +/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example). + But we can find it either under the correct ANSI name, or under GNU + C's internal name. */ + +#define FFI_64_BIT_MAX 9223372036854775807 + +#ifdef LONG_LONG_MAX +# define FFI_LONG_LONG_MAX LONG_LONG_MAX +#else +# ifdef LLONG_MAX +# define FFI_LONG_LONG_MAX LLONG_MAX +# ifdef _AIX52 /* or newer has C99 LLONG_MAX */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif /* _AIX52 or newer */ +# else +# ifdef __GNUC__ +# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__ +# endif +# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */ +# ifndef __PPC64__ +# if defined (__IBMC__) || defined (__IBMCPP__) +# define FFI_LONG_LONG_MAX LONGLONG_MAX +# endif +# endif /* __PPC64__ */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif +# endif +#endif + +/* The closure code assumes that this works on pointers, i.e. a size_t + can hold a pointer. */ + +typedef struct _ffi_type +{ + size_t size; + unsigned short alignment; + unsigned short type; + struct _ffi_type **elements; +} ffi_type; + +/* Need minimal decorations for DLLs to work on Windows. GCC has + autoimport and autoexport. Always mark externally visible symbols + as dllimport for MSVC clients, even if it means an extra indirection + when using the static version of the library. + Besides, as a workaround, they can define FFI_BUILDING if they + *know* they are going to link with the static library. */ +#if defined _MSC_VER +# if defined FFI_BUILDING_DLL /* Building libffi.DLL with msvcc.sh */ +# define FFI_API __declspec(dllexport) +# elif !defined FFI_BUILDING /* Importing libffi.DLL */ +# define FFI_API __declspec(dllimport) +# else /* Building/linking static library */ +# define FFI_API +# endif +#else +# define FFI_API +#endif + +/* The externally visible type declarations also need the MSVC DLL + decorations, or they will not be exported from the object file. */ +#if defined LIBFFI_HIDE_BASIC_TYPES +# define FFI_EXTERN FFI_API +#else +# define FFI_EXTERN extern FFI_API +#endif + +#ifndef LIBFFI_HIDE_BASIC_TYPES +#if SCHAR_MAX == 127 +# define ffi_type_uchar ffi_type_uint8 +# define ffi_type_schar ffi_type_sint8 +#else + #error "char size not supported" +#endif + +#if SHRT_MAX == 32767 +# define ffi_type_ushort ffi_type_uint16 +# define ffi_type_sshort ffi_type_sint16 +#elif SHRT_MAX == 2147483647 +# define ffi_type_ushort ffi_type_uint32 +# define ffi_type_sshort ffi_type_sint32 +#else + #error "short size not supported" +#endif + +#if INT_MAX == 32767 +# define ffi_type_uint ffi_type_uint16 +# define ffi_type_sint ffi_type_sint16 +#elif INT_MAX == 2147483647 +# define ffi_type_uint ffi_type_uint32 +# define ffi_type_sint ffi_type_sint32 +#elif INT_MAX == 9223372036854775807 +# define ffi_type_uint ffi_type_uint64 +# define ffi_type_sint ffi_type_sint64 +#else + #error "int size not supported" +#endif + +#if LONG_MAX == 2147483647 +# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX + #error "no 64-bit data type supported" +# endif +#elif LONG_MAX != FFI_64_BIT_MAX + #error "long size not supported" +#endif + +#if LONG_MAX == 2147483647 +# define ffi_type_ulong ffi_type_uint32 +# define ffi_type_slong ffi_type_sint32 +#elif LONG_MAX == FFI_64_BIT_MAX +# define ffi_type_ulong ffi_type_uint64 +# define ffi_type_slong ffi_type_sint64 +#else + #error "long size not supported" +#endif + +/* These are defined in types.c. */ +FFI_EXTERN ffi_type ffi_type_void; +FFI_EXTERN ffi_type ffi_type_uint8; +FFI_EXTERN ffi_type ffi_type_sint8; +FFI_EXTERN ffi_type ffi_type_uint16; +FFI_EXTERN ffi_type ffi_type_sint16; +FFI_EXTERN ffi_type ffi_type_uint32; +FFI_EXTERN ffi_type ffi_type_sint32; +FFI_EXTERN ffi_type ffi_type_uint64; +FFI_EXTERN ffi_type ffi_type_sint64; +FFI_EXTERN ffi_type ffi_type_float; +FFI_EXTERN ffi_type ffi_type_double; +FFI_EXTERN ffi_type ffi_type_pointer; + +#if 1 +FFI_EXTERN ffi_type ffi_type_longdouble; +#else +#define ffi_type_longdouble ffi_type_double +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_EXTERN ffi_type ffi_type_complex_float; +FFI_EXTERN ffi_type ffi_type_complex_double; +#if 1 +FFI_EXTERN ffi_type ffi_type_complex_longdouble; +#else +#define ffi_type_complex_longdouble ffi_type_complex_double +#endif +#endif +#endif /* LIBFFI_HIDE_BASIC_TYPES */ + +typedef enum { + FFI_OK = 0, + FFI_BAD_TYPEDEF, + FFI_BAD_ABI +} ffi_status; + +typedef struct { + ffi_abi abi; + unsigned nargs; + ffi_type **arg_types; + ffi_type *rtype; + unsigned bytes; + unsigned flags; +#ifdef FFI_EXTRA_CIF_FIELDS + FFI_EXTRA_CIF_FIELDS; +#endif +} ffi_cif; + +/* ---- Definitions for the raw API -------------------------------------- */ + +#ifndef FFI_SIZEOF_ARG +# if LONG_MAX == 2147483647 +# define FFI_SIZEOF_ARG 4 +# elif LONG_MAX == FFI_64_BIT_MAX +# define FFI_SIZEOF_ARG 8 +# endif +#endif + +#ifndef FFI_SIZEOF_JAVA_RAW +# define FFI_SIZEOF_JAVA_RAW FFI_SIZEOF_ARG +#endif + +typedef union { + ffi_sarg sint; + ffi_arg uint; + float flt; + char data[FFI_SIZEOF_ARG]; + void* ptr; +} ffi_raw; + +#if FFI_SIZEOF_JAVA_RAW == 4 && FFI_SIZEOF_ARG == 8 +/* This is a special case for mips64/n32 ABI (and perhaps others) where + sizeof(void *) is 4 and FFI_SIZEOF_ARG is 8. */ +typedef union { + signed int sint; + unsigned int uint; + float flt; + char data[FFI_SIZEOF_JAVA_RAW]; + void* ptr; +} ffi_java_raw; +#else +typedef ffi_raw ffi_java_raw; +#endif + + +FFI_API +void ffi_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_raw *avalue); + +FFI_API void ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw); +FFI_API void ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args); +FFI_API size_t ffi_raw_size (ffi_cif *cif); + +/* This is analogous to the raw API, except it uses Java parameter + packing, even on 64-bit machines. I.e. on 64-bit machines longs + and doubles are followed by an empty 64-bit word. */ + +FFI_API +void ffi_java_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_java_raw *avalue); + +FFI_API +void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw); +FFI_API +void ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args); +FFI_API +size_t ffi_java_raw_size (ffi_cif *cif); + +/* ---- Definitions for closures ----------------------------------------- */ + +#if FFI_CLOSURES + +#ifdef _MSC_VER +__declspec(align(8)) +#endif +typedef struct { +#if FFI_EXEC_TRAMPOLINE_TABLE + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); + void *user_data; +} ffi_closure +#ifdef __GNUC__ + __attribute__((aligned (8))) +#endif + ; + +#ifndef __GNUC__ +# ifdef __sgi +# pragma pack 0 +# endif +#endif + +FFI_API void *ffi_closure_alloc (size_t size, void **code); +FFI_API void ffi_closure_free (void *); + +FFI_API ffi_status +ffi_prep_closure (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 405) + __attribute__((deprecated ("use ffi_prep_closure_loc instead"))) +#elif defined(__GNUC__) && __GNUC__ >= 3 + __attribute__((deprecated)) +#endif + ; + +FFI_API ffi_status +ffi_prep_closure_loc (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void*codeloc); + +#ifdef __sgi +# pragma pack 8 +#endif +typedef struct { +#if FFI_EXEC_TRAMPOLINE_TABLE + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the transaltion, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_raw*,void*); + void *user_data; + +} ffi_raw_closure; + +typedef struct { +#if FFI_EXEC_TRAMPOLINE_TABLE + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the translation, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*); + void *user_data; + +} ffi_java_raw_closure; + +FFI_API ffi_status +ffi_prep_raw_closure (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc); + +FFI_API ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc); + +#endif /* FFI_CLOSURES */ + +#if FFI_GO_CLOSURES + +typedef struct { + void *tramp; + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); +} ffi_go_closure; + +FFI_API ffi_status ffi_prep_go_closure (ffi_go_closure*, ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*)); + +FFI_API void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); + +#endif /* FFI_GO_CLOSURES */ + +/* ---- Public interface definition -------------------------------------- */ + +FFI_API +ffi_status ffi_prep_cif(ffi_cif *cif, + ffi_abi abi, + unsigned int nargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue); + +FFI_API +ffi_status ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, + size_t *offsets); + +/* Useful for eliminating compiler warnings. */ +#define FFI_FN(f) ((void (*)(void))f) + +/* ---- Definitions shared with assembly code ---------------------------- */ + +#endif + +/* If these change, update src/mips/ffitarget.h. */ +#define FFI_TYPE_VOID 0 +#define FFI_TYPE_INT 1 +#define FFI_TYPE_FLOAT 2 +#define FFI_TYPE_DOUBLE 3 +#if 1 +#define FFI_TYPE_LONGDOUBLE 4 +#else +#define FFI_TYPE_LONGDOUBLE FFI_TYPE_DOUBLE +#endif +#define FFI_TYPE_UINT8 5 +#define FFI_TYPE_SINT8 6 +#define FFI_TYPE_UINT16 7 +#define FFI_TYPE_SINT16 8 +#define FFI_TYPE_UINT32 9 +#define FFI_TYPE_SINT32 10 +#define FFI_TYPE_UINT64 11 +#define FFI_TYPE_SINT64 12 +#define FFI_TYPE_STRUCT 13 +#define FFI_TYPE_POINTER 14 +#define FFI_TYPE_COMPLEX 15 + +/* This should always refer to the last type code (for sanity checks). */ +#define FFI_TYPE_LAST FFI_TYPE_COMPLEX + +#ifdef __cplusplus +} +#endif + +#endif + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/platform_include/ffi_i386.h b/module/src/main/cpp/whale/src/libffi/platform_include/ffi_i386.h new file mode 100644 index 00000000..61e97c10 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/platform_include/ffi_i386.h @@ -0,0 +1,516 @@ +#ifdef __i386__ + +/* -----------------------------------------------------------------*-C-*- + libffi 3.3-rc0 - Copyright (c) 2011, 2014 Anthony Green + - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the ``Software''), to deal in the Software without + restriction, including without limitation the rights to use, copy, + modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +/* ------------------------------------------------------------------- + Most of the API is documented in doc/libffi.texi. + + The raw API is designed to bypass some of the argument packing and + unpacking on architectures for which it can be avoided. Routines + are provided to emulate the raw API if the underlying platform + doesn't allow faster implementation. + + More details on the raw API can be found in: + + http://gcc.gnu.org/ml/java/1999-q3/msg00138.html + + and + + http://gcc.gnu.org/ml/java/1999-q3/msg00174.html + -------------------------------------------------------------------- */ + +#ifndef LIBFFI_H +#define LIBFFI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Specify which architecture libffi is configured for. */ +#ifndef X86_UNIX +#define X86_UNIX +#endif + +/* ---- System configuration information --------------------------------- */ + +#include + +#ifndef LIBFFI_ASM + +#if defined(_MSC_VER) && !defined(__clang__) +#define __attribute__(X) +#endif + +#include +#include + +/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example). + But we can find it either under the correct ANSI name, or under GNU + C's internal name. */ + +#define FFI_64_BIT_MAX 9223372036854775807 + +#ifdef LONG_LONG_MAX +# define FFI_LONG_LONG_MAX LONG_LONG_MAX +#else +# ifdef LLONG_MAX +# define FFI_LONG_LONG_MAX LLONG_MAX +# ifdef _AIX52 /* or newer has C99 LLONG_MAX */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif /* _AIX52 or newer */ +# else +# ifdef __GNUC__ +# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__ +# endif +# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */ +# ifndef __PPC64__ +# if defined (__IBMC__) || defined (__IBMCPP__) +# define FFI_LONG_LONG_MAX LONGLONG_MAX +# endif +# endif /* __PPC64__ */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif +# endif +#endif + +/* The closure code assumes that this works on pointers, i.e. a size_t + can hold a pointer. */ + +typedef struct _ffi_type +{ + size_t size; + unsigned short alignment; + unsigned short type; + struct _ffi_type **elements; +} ffi_type; + +/* Need minimal decorations for DLLs to work on Windows. GCC has + autoimport and autoexport. Always mark externally visible symbols + as dllimport for MSVC clients, even if it means an extra indirection + when using the static version of the library. + Besides, as a workaround, they can define FFI_BUILDING if they + *know* they are going to link with the static library. */ +#if defined _MSC_VER +# if defined FFI_BUILDING_DLL /* Building libffi.DLL with msvcc.sh */ +# define FFI_API __declspec(dllexport) +# elif !defined FFI_BUILDING /* Importing libffi.DLL */ +# define FFI_API __declspec(dllimport) +# else /* Building/linking static library */ +# define FFI_API +# endif +#else +# define FFI_API +#endif + +/* The externally visible type declarations also need the MSVC DLL + decorations, or they will not be exported from the object file. */ +#if defined LIBFFI_HIDE_BASIC_TYPES +# define FFI_EXTERN FFI_API +#else +# define FFI_EXTERN extern FFI_API +#endif + +#ifndef LIBFFI_HIDE_BASIC_TYPES +#if SCHAR_MAX == 127 +# define ffi_type_uchar ffi_type_uint8 +# define ffi_type_schar ffi_type_sint8 +#else + #error "char size not supported" +#endif + +#if SHRT_MAX == 32767 +# define ffi_type_ushort ffi_type_uint16 +# define ffi_type_sshort ffi_type_sint16 +#elif SHRT_MAX == 2147483647 +# define ffi_type_ushort ffi_type_uint32 +# define ffi_type_sshort ffi_type_sint32 +#else + #error "short size not supported" +#endif + +#if INT_MAX == 32767 +# define ffi_type_uint ffi_type_uint16 +# define ffi_type_sint ffi_type_sint16 +#elif INT_MAX == 2147483647 +# define ffi_type_uint ffi_type_uint32 +# define ffi_type_sint ffi_type_sint32 +#elif INT_MAX == 9223372036854775807 +# define ffi_type_uint ffi_type_uint64 +# define ffi_type_sint ffi_type_sint64 +#else + #error "int size not supported" +#endif + +#if LONG_MAX == 2147483647 +# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX + #error "no 64-bit data type supported" +# endif +#elif LONG_MAX != FFI_64_BIT_MAX + #error "long size not supported" +#endif + +#if LONG_MAX == 2147483647 +# define ffi_type_ulong ffi_type_uint32 +# define ffi_type_slong ffi_type_sint32 +#elif LONG_MAX == FFI_64_BIT_MAX +# define ffi_type_ulong ffi_type_uint64 +# define ffi_type_slong ffi_type_sint64 +#else + #error "long size not supported" +#endif + +/* These are defined in types.c. */ +FFI_EXTERN ffi_type ffi_type_void; +FFI_EXTERN ffi_type ffi_type_uint8; +FFI_EXTERN ffi_type ffi_type_sint8; +FFI_EXTERN ffi_type ffi_type_uint16; +FFI_EXTERN ffi_type ffi_type_sint16; +FFI_EXTERN ffi_type ffi_type_uint32; +FFI_EXTERN ffi_type ffi_type_sint32; +FFI_EXTERN ffi_type ffi_type_uint64; +FFI_EXTERN ffi_type ffi_type_sint64; +FFI_EXTERN ffi_type ffi_type_float; +FFI_EXTERN ffi_type ffi_type_double; +FFI_EXTERN ffi_type ffi_type_pointer; + +#if 1 +FFI_EXTERN ffi_type ffi_type_longdouble; +#else +#define ffi_type_longdouble ffi_type_double +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_EXTERN ffi_type ffi_type_complex_float; +FFI_EXTERN ffi_type ffi_type_complex_double; +#if 1 +FFI_EXTERN ffi_type ffi_type_complex_longdouble; +#else +#define ffi_type_complex_longdouble ffi_type_complex_double +#endif +#endif +#endif /* LIBFFI_HIDE_BASIC_TYPES */ + +typedef enum { + FFI_OK = 0, + FFI_BAD_TYPEDEF, + FFI_BAD_ABI +} ffi_status; + +typedef struct { + ffi_abi abi; + unsigned nargs; + ffi_type **arg_types; + ffi_type *rtype; + unsigned bytes; + unsigned flags; +#ifdef FFI_EXTRA_CIF_FIELDS + FFI_EXTRA_CIF_FIELDS; +#endif +} ffi_cif; + +/* ---- Definitions for the raw API -------------------------------------- */ + +#ifndef FFI_SIZEOF_ARG +# if LONG_MAX == 2147483647 +# define FFI_SIZEOF_ARG 4 +# elif LONG_MAX == FFI_64_BIT_MAX +# define FFI_SIZEOF_ARG 8 +# endif +#endif + +#ifndef FFI_SIZEOF_JAVA_RAW +# define FFI_SIZEOF_JAVA_RAW FFI_SIZEOF_ARG +#endif + +typedef union { + ffi_sarg sint; + ffi_arg uint; + float flt; + char data[FFI_SIZEOF_ARG]; + void* ptr; +} ffi_raw; + +#if FFI_SIZEOF_JAVA_RAW == 4 && FFI_SIZEOF_ARG == 8 +/* This is a special case for mips64/n32 ABI (and perhaps others) where + sizeof(void *) is 4 and FFI_SIZEOF_ARG is 8. */ +typedef union { + signed int sint; + unsigned int uint; + float flt; + char data[FFI_SIZEOF_JAVA_RAW]; + void* ptr; +} ffi_java_raw; +#else +typedef ffi_raw ffi_java_raw; +#endif + + +FFI_API +void ffi_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_raw *avalue); + +FFI_API void ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw); +FFI_API void ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args); +FFI_API size_t ffi_raw_size (ffi_cif *cif); + +/* This is analogous to the raw API, except it uses Java parameter + packing, even on 64-bit machines. I.e. on 64-bit machines longs + and doubles are followed by an empty 64-bit word. */ + +FFI_API +void ffi_java_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_java_raw *avalue); + +FFI_API +void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw); +FFI_API +void ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args); +FFI_API +size_t ffi_java_raw_size (ffi_cif *cif); + +/* ---- Definitions for closures ----------------------------------------- */ + +#if FFI_CLOSURES + +#ifdef _MSC_VER +__declspec(align(8)) +#endif +typedef struct { +#if 0 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); + void *user_data; +} ffi_closure +#ifdef __GNUC__ + __attribute__((aligned (8))) +#endif + ; + +#ifndef __GNUC__ +# ifdef __sgi +# pragma pack 0 +# endif +#endif + +FFI_API void *ffi_closure_alloc (size_t size, void **code); +FFI_API void ffi_closure_free (void *); + +FFI_API ffi_status +ffi_prep_closure (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 405) + __attribute__((deprecated ("use ffi_prep_closure_loc instead"))) +#elif defined(__GNUC__) && __GNUC__ >= 3 + __attribute__((deprecated)) +#endif + ; + +FFI_API ffi_status +ffi_prep_closure_loc (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void*codeloc); + +#ifdef __sgi +# pragma pack 8 +#endif +typedef struct { +#if 0 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the transaltion, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_raw*,void*); + void *user_data; + +} ffi_raw_closure; + +typedef struct { +#if 0 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the translation, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*); + void *user_data; + +} ffi_java_raw_closure; + +FFI_API ffi_status +ffi_prep_raw_closure (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc); + +FFI_API ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc); + +#endif /* FFI_CLOSURES */ + +#if FFI_GO_CLOSURES + +typedef struct { + void *tramp; + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); +} ffi_go_closure; + +FFI_API ffi_status ffi_prep_go_closure (ffi_go_closure*, ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*)); + +FFI_API void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); + +#endif /* FFI_GO_CLOSURES */ + +/* ---- Public interface definition -------------------------------------- */ + +FFI_API +ffi_status ffi_prep_cif(ffi_cif *cif, + ffi_abi abi, + unsigned int nargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue); + +FFI_API +ffi_status ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, + size_t *offsets); + +/* Useful for eliminating compiler warnings. */ +#define FFI_FN(f) ((void (*)(void))f) + +/* ---- Definitions shared with assembly code ---------------------------- */ + +#endif + +/* If these change, update src/mips/ffitarget.h. */ +#define FFI_TYPE_VOID 0 +#define FFI_TYPE_INT 1 +#define FFI_TYPE_FLOAT 2 +#define FFI_TYPE_DOUBLE 3 +#if 1 +#define FFI_TYPE_LONGDOUBLE 4 +#else +#define FFI_TYPE_LONGDOUBLE FFI_TYPE_DOUBLE +#endif +#define FFI_TYPE_UINT8 5 +#define FFI_TYPE_SINT8 6 +#define FFI_TYPE_UINT16 7 +#define FFI_TYPE_SINT16 8 +#define FFI_TYPE_UINT32 9 +#define FFI_TYPE_SINT32 10 +#define FFI_TYPE_UINT64 11 +#define FFI_TYPE_SINT64 12 +#define FFI_TYPE_STRUCT 13 +#define FFI_TYPE_POINTER 14 +#define FFI_TYPE_COMPLEX 15 + +/* This should always refer to the last type code (for sanity checks). */ +#define FFI_TYPE_LAST FFI_TYPE_COMPLEX + +#ifdef __cplusplus +} +#endif + +#endif + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/platform_include/ffi_x86_64.h b/module/src/main/cpp/whale/src/libffi/platform_include/ffi_x86_64.h new file mode 100644 index 00000000..78e7f418 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/platform_include/ffi_x86_64.h @@ -0,0 +1,516 @@ +#ifdef __x86_64__ + +/* -----------------------------------------------------------------*-C-*- + libffi 3.3-rc0 - Copyright (c) 2011, 2014 Anthony Green + - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the ``Software''), to deal in the Software without + restriction, including without limitation the rights to use, copy, + modify, merge, publish, distribute, sublicense, and/or sell copies + of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +/* ------------------------------------------------------------------- + Most of the API is documented in doc/libffi.texi. + + The raw API is designed to bypass some of the argument packing and + unpacking on architectures for which it can be avoided. Routines + are provided to emulate the raw API if the underlying platform + doesn't allow faster implementation. + + More details on the raw API can be found in: + + http://gcc.gnu.org/ml/java/1999-q3/msg00138.html + + and + + http://gcc.gnu.org/ml/java/1999-q3/msg00174.html + -------------------------------------------------------------------- */ + +#ifndef LIBFFI_H +#define LIBFFI_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* Specify which architecture libffi is configured for. */ +#ifndef X86_64 +#define X86_64 +#endif + +/* ---- System configuration information --------------------------------- */ + +#include + +#ifndef LIBFFI_ASM + +#if defined(_MSC_VER) && !defined(__clang__) +#define __attribute__(X) +#endif + +#include +#include + +/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example). + But we can find it either under the correct ANSI name, or under GNU + C's internal name. */ + +#define FFI_64_BIT_MAX 9223372036854775807 + +#ifdef LONG_LONG_MAX +# define FFI_LONG_LONG_MAX LONG_LONG_MAX +#else +# ifdef LLONG_MAX +# define FFI_LONG_LONG_MAX LLONG_MAX +# ifdef _AIX52 /* or newer has C99 LLONG_MAX */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif /* _AIX52 or newer */ +# else +# ifdef __GNUC__ +# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__ +# endif +# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */ +# ifndef __PPC64__ +# if defined (__IBMC__) || defined (__IBMCPP__) +# define FFI_LONG_LONG_MAX LONGLONG_MAX +# endif +# endif /* __PPC64__ */ +# undef FFI_64_BIT_MAX +# define FFI_64_BIT_MAX 9223372036854775807LL +# endif +# endif +#endif + +/* The closure code assumes that this works on pointers, i.e. a size_t + can hold a pointer. */ + +typedef struct _ffi_type +{ + size_t size; + unsigned short alignment; + unsigned short type; + struct _ffi_type **elements; +} ffi_type; + +/* Need minimal decorations for DLLs to work on Windows. GCC has + autoimport and autoexport. Always mark externally visible symbols + as dllimport for MSVC clients, even if it means an extra indirection + when using the static version of the library. + Besides, as a workaround, they can define FFI_BUILDING if they + *know* they are going to link with the static library. */ +#if defined _MSC_VER +# if defined FFI_BUILDING_DLL /* Building libffi.DLL with msvcc.sh */ +# define FFI_API __declspec(dllexport) +# elif !defined FFI_BUILDING /* Importing libffi.DLL */ +# define FFI_API __declspec(dllimport) +# else /* Building/linking static library */ +# define FFI_API +# endif +#else +# define FFI_API +#endif + +/* The externally visible type declarations also need the MSVC DLL + decorations, or they will not be exported from the object file. */ +#if defined LIBFFI_HIDE_BASIC_TYPES +# define FFI_EXTERN FFI_API +#else +# define FFI_EXTERN extern FFI_API +#endif + +#ifndef LIBFFI_HIDE_BASIC_TYPES +#if SCHAR_MAX == 127 +# define ffi_type_uchar ffi_type_uint8 +# define ffi_type_schar ffi_type_sint8 +#else + #error "char size not supported" +#endif + +#if SHRT_MAX == 32767 +# define ffi_type_ushort ffi_type_uint16 +# define ffi_type_sshort ffi_type_sint16 +#elif SHRT_MAX == 2147483647 +# define ffi_type_ushort ffi_type_uint32 +# define ffi_type_sshort ffi_type_sint32 +#else + #error "short size not supported" +#endif + +#if INT_MAX == 32767 +# define ffi_type_uint ffi_type_uint16 +# define ffi_type_sint ffi_type_sint16 +#elif INT_MAX == 2147483647 +# define ffi_type_uint ffi_type_uint32 +# define ffi_type_sint ffi_type_sint32 +#elif INT_MAX == 9223372036854775807 +# define ffi_type_uint ffi_type_uint64 +# define ffi_type_sint ffi_type_sint64 +#else + #error "int size not supported" +#endif + +#if LONG_MAX == 2147483647 +# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX + #error "no 64-bit data type supported" +# endif +#elif LONG_MAX != FFI_64_BIT_MAX + #error "long size not supported" +#endif + +#if LONG_MAX == 2147483647 +# define ffi_type_ulong ffi_type_uint32 +# define ffi_type_slong ffi_type_sint32 +#elif LONG_MAX == FFI_64_BIT_MAX +# define ffi_type_ulong ffi_type_uint64 +# define ffi_type_slong ffi_type_sint64 +#else + #error "long size not supported" +#endif + +/* These are defined in types.c. */ +FFI_EXTERN ffi_type ffi_type_void; +FFI_EXTERN ffi_type ffi_type_uint8; +FFI_EXTERN ffi_type ffi_type_sint8; +FFI_EXTERN ffi_type ffi_type_uint16; +FFI_EXTERN ffi_type ffi_type_sint16; +FFI_EXTERN ffi_type ffi_type_uint32; +FFI_EXTERN ffi_type ffi_type_sint32; +FFI_EXTERN ffi_type ffi_type_uint64; +FFI_EXTERN ffi_type ffi_type_sint64; +FFI_EXTERN ffi_type ffi_type_float; +FFI_EXTERN ffi_type ffi_type_double; +FFI_EXTERN ffi_type ffi_type_pointer; + +#if 1 +FFI_EXTERN ffi_type ffi_type_longdouble; +#else +#define ffi_type_longdouble ffi_type_double +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_EXTERN ffi_type ffi_type_complex_float; +FFI_EXTERN ffi_type ffi_type_complex_double; +#if 1 +FFI_EXTERN ffi_type ffi_type_complex_longdouble; +#else +#define ffi_type_complex_longdouble ffi_type_complex_double +#endif +#endif +#endif /* LIBFFI_HIDE_BASIC_TYPES */ + +typedef enum { + FFI_OK = 0, + FFI_BAD_TYPEDEF, + FFI_BAD_ABI +} ffi_status; + +typedef struct { + ffi_abi abi; + unsigned nargs; + ffi_type **arg_types; + ffi_type *rtype; + unsigned bytes; + unsigned flags; +#ifdef FFI_EXTRA_CIF_FIELDS + FFI_EXTRA_CIF_FIELDS; +#endif +} ffi_cif; + +/* ---- Definitions for the raw API -------------------------------------- */ + +#ifndef FFI_SIZEOF_ARG +# if LONG_MAX == 2147483647 +# define FFI_SIZEOF_ARG 4 +# elif LONG_MAX == FFI_64_BIT_MAX +# define FFI_SIZEOF_ARG 8 +# endif +#endif + +#ifndef FFI_SIZEOF_JAVA_RAW +# define FFI_SIZEOF_JAVA_RAW FFI_SIZEOF_ARG +#endif + +typedef union { + ffi_sarg sint; + ffi_arg uint; + float flt; + char data[FFI_SIZEOF_ARG]; + void* ptr; +} ffi_raw; + +#if FFI_SIZEOF_JAVA_RAW == 4 && FFI_SIZEOF_ARG == 8 +/* This is a special case for mips64/n32 ABI (and perhaps others) where + sizeof(void *) is 4 and FFI_SIZEOF_ARG is 8. */ +typedef union { + signed int sint; + unsigned int uint; + float flt; + char data[FFI_SIZEOF_JAVA_RAW]; + void* ptr; +} ffi_java_raw; +#else +typedef ffi_raw ffi_java_raw; +#endif + + +FFI_API +void ffi_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_raw *avalue); + +FFI_API void ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw); +FFI_API void ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args); +FFI_API size_t ffi_raw_size (ffi_cif *cif); + +/* This is analogous to the raw API, except it uses Java parameter + packing, even on 64-bit machines. I.e. on 64-bit machines longs + and doubles are followed by an empty 64-bit word. */ + +FFI_API +void ffi_java_raw_call (ffi_cif *cif, + void (*fn)(void), + void *rvalue, + ffi_java_raw *avalue); + +FFI_API +void ffi_java_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_java_raw *raw); +FFI_API +void ffi_java_raw_to_ptrarray (ffi_cif *cif, ffi_java_raw *raw, void **args); +FFI_API +size_t ffi_java_raw_size (ffi_cif *cif); + +/* ---- Definitions for closures ----------------------------------------- */ + +#if FFI_CLOSURES + +#ifdef _MSC_VER +__declspec(align(8)) +#endif +typedef struct { +#if 0 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); + void *user_data; +} ffi_closure +#ifdef __GNUC__ + __attribute__((aligned (8))) +#endif + ; + +#ifndef __GNUC__ +# ifdef __sgi +# pragma pack 0 +# endif +#endif + +FFI_API void *ffi_closure_alloc (size_t size, void **code); +FFI_API void ffi_closure_free (void *); + +FFI_API ffi_status +ffi_prep_closure (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 405) + __attribute__((deprecated ("use ffi_prep_closure_loc instead"))) +#elif defined(__GNUC__) && __GNUC__ >= 3 + __attribute__((deprecated)) +#endif + ; + +FFI_API ffi_status +ffi_prep_closure_loc (ffi_closure*, + ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void*codeloc); + +#ifdef __sgi +# pragma pack 8 +#endif +typedef struct { +#if 0 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the transaltion, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_raw*,void*); + void *user_data; + +} ffi_raw_closure; + +typedef struct { +#if 0 + void *trampoline_table; + void *trampoline_table_entry; +#else + char tramp[FFI_TRAMPOLINE_SIZE]; +#endif + + ffi_cif *cif; + +#if !FFI_NATIVE_RAW_API + + /* If this is enabled, then a raw closure has the same layout + as a regular closure. We use this to install an intermediate + handler to do the translation, void** -> ffi_raw*. */ + + void (*translate_args)(ffi_cif*,void*,void**,void*); + void *this_closure; + +#endif + + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*); + void *user_data; + +} ffi_java_raw_closure; + +FFI_API ffi_status +ffi_prep_raw_closure (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc); + +FFI_API ffi_status +ffi_prep_java_raw_closure (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data); + +FFI_API ffi_status +ffi_prep_java_raw_closure_loc (ffi_java_raw_closure*, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_java_raw*,void*), + void *user_data, + void *codeloc); + +#endif /* FFI_CLOSURES */ + +#if FFI_GO_CLOSURES + +typedef struct { + void *tramp; + ffi_cif *cif; + void (*fun)(ffi_cif*,void*,void**,void*); +} ffi_go_closure; + +FFI_API ffi_status ffi_prep_go_closure (ffi_go_closure*, ffi_cif *, + void (*fun)(ffi_cif*,void*,void**,void*)); + +FFI_API void ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); + +#endif /* FFI_GO_CLOSURES */ + +/* ---- Public interface definition -------------------------------------- */ + +FFI_API +ffi_status ffi_prep_cif(ffi_cif *cif, + ffi_abi abi, + unsigned int nargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes); + +FFI_API +void ffi_call(ffi_cif *cif, + void (*fn)(void), + void *rvalue, + void **avalue); + +FFI_API +ffi_status ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, + size_t *offsets); + +/* Useful for eliminating compiler warnings. */ +#define FFI_FN(f) ((void (*)(void))f) + +/* ---- Definitions shared with assembly code ---------------------------- */ + +#endif + +/* If these change, update src/mips/ffitarget.h. */ +#define FFI_TYPE_VOID 0 +#define FFI_TYPE_INT 1 +#define FFI_TYPE_FLOAT 2 +#define FFI_TYPE_DOUBLE 3 +#if 1 +#define FFI_TYPE_LONGDOUBLE 4 +#else +#define FFI_TYPE_LONGDOUBLE FFI_TYPE_DOUBLE +#endif +#define FFI_TYPE_UINT8 5 +#define FFI_TYPE_SINT8 6 +#define FFI_TYPE_UINT16 7 +#define FFI_TYPE_SINT16 8 +#define FFI_TYPE_UINT32 9 +#define FFI_TYPE_SINT32 10 +#define FFI_TYPE_UINT64 11 +#define FFI_TYPE_SINT64 12 +#define FFI_TYPE_STRUCT 13 +#define FFI_TYPE_POINTER 14 +#define FFI_TYPE_COMPLEX 15 + +/* This should always refer to the last type code (for sanity checks). */ +#define FFI_TYPE_LAST FFI_TYPE_COMPLEX + +#ifdef __cplusplus +} +#endif + +#endif + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/platform_include/fficonfig_arm64.h b/module/src/main/cpp/whale/src/libffi/platform_include/fficonfig_arm64.h new file mode 100644 index 00000000..1d94fe3c --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/platform_include/fficonfig_arm64.h @@ -0,0 +1,224 @@ +#if defined(__aarch64__) || defined(__arm64__) + +/* fficonfig.h. Generated from fficonfig.h.in by configure. */ +/* fficonfig.h.in. Generated from configure.ac by autoheader. */ + +/* Define if building universal (internal helper macro) */ +/* #undef AC_APPLE_UNIVERSAL_BUILD */ + +/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP + systems. This function is required for `alloca.c' support on those systems. + */ +/* #undef CRAY_STACKSEG_END */ + +/* Define to 1 if using `alloca.c'. */ +/* #undef C_ALLOCA */ + +/* Define to the flags needed for the .section .eh_frame directive. */ +#define EH_FRAME_FLAGS "aw" + +/* Define this if you want extra debugging. */ +/* #undef FFI_DEBUG */ + +/* Cannot use PROT_EXEC on this target, so, we revert to alternative means */ +#ifdef __APPLE__ +#define FFI_EXEC_TRAMPOLINE_TABLE 1 +#endif + +/* Define this if you want to enable pax emulated trampolines */ +/* #undef FFI_MMAP_EXEC_EMUTRAMP_PAX */ + +/* Cannot use malloc on this target, so, we revert to alternative means */ +#ifdef linux +#define FFI_MMAP_EXEC_WRIT 1 +#endif + +/* Define this if you do not want support for the raw API. */ +/* #undef FFI_NO_RAW_API */ + +/* Define this if you do not want support for aggregate types. */ +/* #undef FFI_NO_STRUCTS */ + +/* Define to 1 if you have `alloca', as a function or macro. */ +#define HAVE_ALLOCA 1 + +/* Define to 1 if you have and it should be used (not on Ultrix). + */ +#define HAVE_ALLOCA_H 1 + +/* Define if your assembler supports .cfi_* directives. */ +#define HAVE_AS_CFI_PSEUDO_OP 1 + +/* Define if your assembler supports .register. */ +/* #undef HAVE_AS_REGISTER_PSEUDO_OP */ + +/* Define if the compiler uses zarch features. */ +/* #undef HAVE_AS_S390_ZARCH */ + +/* Define if your assembler and linker support unaligned PC relative relocs. + */ +/* #undef HAVE_AS_SPARC_UA_PCREL */ + +/* Define if your assembler supports unwind section type. */ +/* #undef HAVE_AS_X86_64_UNWIND_SECTION_TYPE */ + +/* Define if your assembler supports PC relative relocs. */ +/* #undef HAVE_AS_X86_PCREL */ + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define if __attribute__((visibility("hidden"))) is supported. */ +#define HAVE_HIDDEN_VISIBILITY_ATTRIBUTE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define if you have the long double type and it is bigger than a double */ +/* #undef HAVE_LONG_DOUBLE */ + +/* Define if you support more than one size of the long double type */ +/* #undef HAVE_LONG_DOUBLE_VARIANT */ + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `mkostemp' function. */ +/* #undef HAVE_MKOSTEMP */ + +/* Define to 1 if you have the `mmap' function. */ +#define HAVE_MMAP 1 + +/* Define if mmap with MAP_ANON(YMOUS) works. */ +#define HAVE_MMAP_ANON 1 + +/* Define if mmap of /dev/zero works. */ +/* #undef HAVE_MMAP_DEV_ZERO */ + +/* Define if read-only mmap of a plain file works. */ +#define HAVE_MMAP_FILE 1 + +/* Define if .eh_frame sections should be read-only. */ +/* #undef HAVE_RO_EH_FRAME */ + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_MMAN_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if GNU symbol versioning is used for libatomic. */ +/* #undef LIBFFI_GNU_SYMBOL_VERSIONING */ + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "libffi" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "libffi" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "libffi 3.3-rc0" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "libffi" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "3.3-rc0" + +/* The size of `double', as computed by sizeof. */ +#define SIZEOF_DOUBLE 8 + +/* The size of `long double', as computed by sizeof. */ +#define SIZEOF_LONG_DOUBLE 8 + +/* The size of `size_t', as computed by sizeof. */ +#define SIZEOF_SIZE_T 8 + +/* If using the C implementation of alloca, define if you know the + direction of stack growth for your system; otherwise it will be + automatically deduced at runtime. + STACK_DIRECTION > 0 => grows toward higher addresses + STACK_DIRECTION < 0 => grows toward lower addresses + STACK_DIRECTION = 0 => direction of growth unknown */ +/* #undef STACK_DIRECTION */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define if symbols are underscored. */ +#define SYMBOL_UNDERSCORE 1 + +/* Define this if you are using Purify and want to suppress spurious messages. + */ +/* #undef USING_PURIFY */ + +/* Version number of package */ +#define VERSION "3.3-rc0" + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +/* # undef WORDS_BIGENDIAN */ +# endif +#endif + +/* Define to `unsigned int' if does not define. */ +/* #undef size_t */ + + +#ifdef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +#ifdef LIBFFI_ASM +#ifdef __APPLE__ +#define FFI_HIDDEN(name) .private_extern name +#else +#define FFI_HIDDEN(name) .hidden name +#endif +#else +#define FFI_HIDDEN __attribute__ ((visibility ("hidden"))) +#endif +#else +#ifdef LIBFFI_ASM +#define FFI_HIDDEN(name) +#else +#define FFI_HIDDEN +#endif +#endif + + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/platform_include/fficonfig_armv7.h b/module/src/main/cpp/whale/src/libffi/platform_include/fficonfig_armv7.h new file mode 100644 index 00000000..64c30a50 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/platform_include/fficonfig_armv7.h @@ -0,0 +1,224 @@ +#ifdef __arm__ + +/* fficonfig.h. Generated from fficonfig.h.in by configure. */ +/* fficonfig.h.in. Generated from configure.ac by autoheader. */ + +/* Define if building universal (internal helper macro) */ +/* #undef AC_APPLE_UNIVERSAL_BUILD */ + +/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP + systems. This function is required for `alloca.c' support on those systems. + */ +/* #undef CRAY_STACKSEG_END */ + +/* Define to 1 if using `alloca.c'. */ +/* #undef C_ALLOCA */ + +/* Define to the flags needed for the .section .eh_frame directive. */ +#define EH_FRAME_FLAGS "aw" + +/* Define this if you want extra debugging. */ +/* #undef FFI_DEBUG */ + +/* Cannot use PROT_EXEC on this target, so, we revert to alternative means */ +#ifdef __APPLE__ +#define FFI_EXEC_TRAMPOLINE_TABLE 1 +#endif + +/* Define this if you want to enable pax emulated trampolines */ +/* #undef FFI_MMAP_EXEC_EMUTRAMP_PAX */ + +/* Cannot use malloc on this target, so, we revert to alternative means */ +#ifdef linux +#define FFI_MMAP_EXEC_WRIT 1 +#endif + +/* Define this if you do not want support for the raw API. */ +/* #undef FFI_NO_RAW_API */ + +/* Define this if you do not want support for aggregate types. */ +/* #undef FFI_NO_STRUCTS */ + +/* Define to 1 if you have `alloca', as a function or macro. */ +#define HAVE_ALLOCA 1 + +/* Define to 1 if you have and it should be used (not on Ultrix). + */ +#define HAVE_ALLOCA_H 1 + +/* Define if your assembler supports .cfi_* directives. */ +#define HAVE_AS_CFI_PSEUDO_OP 1 + +/* Define if your assembler supports .register. */ +/* #undef HAVE_AS_REGISTER_PSEUDO_OP */ + +/* Define if the compiler uses zarch features. */ +/* #undef HAVE_AS_S390_ZARCH */ + +/* Define if your assembler and linker support unaligned PC relative relocs. + */ +/* #undef HAVE_AS_SPARC_UA_PCREL */ + +/* Define if your assembler supports unwind section type. */ +/* #undef HAVE_AS_X86_64_UNWIND_SECTION_TYPE */ + +/* Define if your assembler supports PC relative relocs. */ +/* #undef HAVE_AS_X86_PCREL */ + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define if __attribute__((visibility("hidden"))) is supported. */ +#define HAVE_HIDDEN_VISIBILITY_ATTRIBUTE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define if you have the long double type and it is bigger than a double */ +/* #undef HAVE_LONG_DOUBLE */ + +/* Define if you support more than one size of the long double type */ +/* #undef HAVE_LONG_DOUBLE_VARIANT */ + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `mkostemp' function. */ +/* #undef HAVE_MKOSTEMP */ + +/* Define to 1 if you have the `mmap' function. */ +#define HAVE_MMAP 1 + +/* Define if mmap with MAP_ANON(YMOUS) works. */ +#define HAVE_MMAP_ANON 1 + +/* Define if mmap of /dev/zero works. */ +/* #undef HAVE_MMAP_DEV_ZERO */ + +/* Define if read-only mmap of a plain file works. */ +#define HAVE_MMAP_FILE 1 + +/* Define if .eh_frame sections should be read-only. */ +/* #undef HAVE_RO_EH_FRAME */ + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_MMAN_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if GNU symbol versioning is used for libatomic. */ +/* #undef LIBFFI_GNU_SYMBOL_VERSIONING */ + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "libffi" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "libffi" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "libffi 3.3-rc0" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "libffi" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "3.3-rc0" + +/* The size of `double', as computed by sizeof. */ +#define SIZEOF_DOUBLE 8 + +/* The size of `long double', as computed by sizeof. */ +#define SIZEOF_LONG_DOUBLE 8 + +/* The size of `size_t', as computed by sizeof. */ +#define SIZEOF_SIZE_T 4 + +/* If using the C implementation of alloca, define if you know the + direction of stack growth for your system; otherwise it will be + automatically deduced at runtime. + STACK_DIRECTION > 0 => grows toward higher addresses + STACK_DIRECTION < 0 => grows toward lower addresses + STACK_DIRECTION = 0 => direction of growth unknown */ +/* #undef STACK_DIRECTION */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define if symbols are underscored. */ +#define SYMBOL_UNDERSCORE 1 + +/* Define this if you are using Purify and want to suppress spurious messages. + */ +/* #undef USING_PURIFY */ + +/* Version number of package */ +#define VERSION "3.3-rc0" + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +/* # undef WORDS_BIGENDIAN */ +# endif +#endif + +/* Define to `unsigned int' if does not define. */ +/* #undef size_t */ + + +#ifdef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +#ifdef LIBFFI_ASM +#ifdef __APPLE__ +#define FFI_HIDDEN(name) .private_extern name +#else +#define FFI_HIDDEN(name) .hidden name +#endif +#else +#define FFI_HIDDEN __attribute__ ((visibility ("hidden"))) +#endif +#else +#ifdef LIBFFI_ASM +#define FFI_HIDDEN(name) +#else +#define FFI_HIDDEN +#endif +#endif + + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/platform_include/fficonfig_i386.h b/module/src/main/cpp/whale/src/libffi/platform_include/fficonfig_i386.h new file mode 100644 index 00000000..43415fb3 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/platform_include/fficonfig_i386.h @@ -0,0 +1,220 @@ +#ifdef __i386__ + +/* fficonfig.h. Generated from fficonfig.h.in by configure. */ +/* fficonfig.h.in. Generated from configure.ac by autoheader. */ + +/* Define if building universal (internal helper macro) */ +/* #undef AC_APPLE_UNIVERSAL_BUILD */ + +/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP + systems. This function is required for `alloca.c' support on those systems. + */ +/* #undef CRAY_STACKSEG_END */ + +/* Define to 1 if using `alloca.c'. */ +/* #undef C_ALLOCA */ + +/* Define to the flags needed for the .section .eh_frame directive. */ +#define EH_FRAME_FLAGS "aw" + +/* Define this if you want extra debugging. */ +/* #undef FFI_DEBUG */ + +/* Cannot use PROT_EXEC on this target, so, we revert to alternative means */ +/* #undef FFI_EXEC_TRAMPOLINE_TABLE */ + +/* Define this if you want to enable pax emulated trampolines */ +/* #undef FFI_MMAP_EXEC_EMUTRAMP_PAX */ + +/* Cannot use malloc on this target, so, we revert to alternative means */ +#define FFI_MMAP_EXEC_WRIT 1 + +/* Define this if you do not want support for the raw API. */ +/* #undef FFI_NO_RAW_API */ + +/* Define this if you do not want support for aggregate types. */ +/* #undef FFI_NO_STRUCTS */ + +/* Define to 1 if you have `alloca', as a function or macro. */ +#define HAVE_ALLOCA 1 + +/* Define to 1 if you have and it should be used (not on Ultrix). + */ +#define HAVE_ALLOCA_H 1 + +/* Define if your assembler supports .cfi_* directives. */ +#define HAVE_AS_CFI_PSEUDO_OP 1 + +/* Define if your assembler supports .register. */ +/* #undef HAVE_AS_REGISTER_PSEUDO_OP */ + +/* Define if the compiler uses zarch features. */ +/* #undef HAVE_AS_S390_ZARCH */ + +/* Define if your assembler and linker support unaligned PC relative relocs. + */ +/* #undef HAVE_AS_SPARC_UA_PCREL */ + +/* Define if your assembler supports unwind section type. */ +/* #undef HAVE_AS_X86_64_UNWIND_SECTION_TYPE */ + +/* Define if your assembler supports PC relative relocs. */ +#define HAVE_AS_X86_PCREL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define if __attribute__((visibility("hidden"))) is supported. */ +#define HAVE_HIDDEN_VISIBILITY_ATTRIBUTE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define if you have the long double type and it is bigger than a double */ +#define HAVE_LONG_DOUBLE 1 + +/* Define if you support more than one size of the long double type */ +/* #undef HAVE_LONG_DOUBLE_VARIANT */ + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `mkostemp' function. */ +/* #undef HAVE_MKOSTEMP */ + +/* Define to 1 if you have the `mmap' function. */ +#define HAVE_MMAP 1 + +/* Define if mmap with MAP_ANON(YMOUS) works. */ +#define HAVE_MMAP_ANON 1 + +/* Define if mmap of /dev/zero works. */ +/* #undef HAVE_MMAP_DEV_ZERO */ + +/* Define if read-only mmap of a plain file works. */ +#define HAVE_MMAP_FILE 1 + +/* Define if .eh_frame sections should be read-only. */ +/* #undef HAVE_RO_EH_FRAME */ + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_MMAN_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if GNU symbol versioning is used for libatomic. */ +/* #undef LIBFFI_GNU_SYMBOL_VERSIONING */ + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "libffi" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "libffi" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "libffi 3.3-rc0" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "libffi" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "3.3-rc0" + +/* The size of `double', as computed by sizeof. */ +#define SIZEOF_DOUBLE 8 + +/* The size of `long double', as computed by sizeof. */ +#define SIZEOF_LONG_DOUBLE 16 + +/* The size of `size_t', as computed by sizeof. */ +#define SIZEOF_SIZE_T 4 + +/* If using the C implementation of alloca, define if you know the + direction of stack growth for your system; otherwise it will be + automatically deduced at runtime. + STACK_DIRECTION > 0 => grows toward higher addresses + STACK_DIRECTION < 0 => grows toward lower addresses + STACK_DIRECTION = 0 => direction of growth unknown */ +/* #undef STACK_DIRECTION */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define if symbols are underscored. */ +#define SYMBOL_UNDERSCORE 1 + +/* Define this if you are using Purify and want to suppress spurious messages. + */ +/* #undef USING_PURIFY */ + +/* Version number of package */ +#define VERSION "3.3-rc0" + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +/* # undef WORDS_BIGENDIAN */ +# endif +#endif + +/* Define to `unsigned int' if does not define. */ +/* #undef size_t */ + + +#ifdef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +#ifdef LIBFFI_ASM +#ifdef __APPLE__ +#define FFI_HIDDEN(name) .private_extern name +#else +#define FFI_HIDDEN(name) .hidden name +#endif +#else +#define FFI_HIDDEN __attribute__ ((visibility ("hidden"))) +#endif +#else +#ifdef LIBFFI_ASM +#define FFI_HIDDEN(name) +#else +#define FFI_HIDDEN +#endif +#endif + + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/platform_include/fficonfig_x86_64.h b/module/src/main/cpp/whale/src/libffi/platform_include/fficonfig_x86_64.h new file mode 100644 index 00000000..c5f275db --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/platform_include/fficonfig_x86_64.h @@ -0,0 +1,220 @@ +#ifdef __x86_64__ + +/* fficonfig.h. Generated from fficonfig.h.in by configure. */ +/* fficonfig.h.in. Generated from configure.ac by autoheader. */ + +/* Define if building universal (internal helper macro) */ +/* #undef AC_APPLE_UNIVERSAL_BUILD */ + +/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP + systems. This function is required for `alloca.c' support on those systems. + */ +/* #undef CRAY_STACKSEG_END */ + +/* Define to 1 if using `alloca.c'. */ +/* #undef C_ALLOCA */ + +/* Define to the flags needed for the .section .eh_frame directive. */ +#define EH_FRAME_FLAGS "aw" + +/* Define this if you want extra debugging. */ +/* #undef FFI_DEBUG */ + +/* Cannot use PROT_EXEC on this target, so, we revert to alternative means */ +/* #undef FFI_EXEC_TRAMPOLINE_TABLE */ + +/* Define this if you want to enable pax emulated trampolines */ +/* #undef FFI_MMAP_EXEC_EMUTRAMP_PAX */ + +/* Cannot use malloc on this target, so, we revert to alternative means */ +#define FFI_MMAP_EXEC_WRIT 1 + +/* Define this if you do not want support for the raw API. */ +/* #undef FFI_NO_RAW_API */ + +/* Define this if you do not want support for aggregate types. */ +/* #undef FFI_NO_STRUCTS */ + +/* Define to 1 if you have `alloca', as a function or macro. */ +#define HAVE_ALLOCA 1 + +/* Define to 1 if you have and it should be used (not on Ultrix). + */ +#define HAVE_ALLOCA_H 1 + +/* Define if your assembler supports .cfi_* directives. */ +#define HAVE_AS_CFI_PSEUDO_OP 1 + +/* Define if your assembler supports .register. */ +/* #undef HAVE_AS_REGISTER_PSEUDO_OP */ + +/* Define if the compiler uses zarch features. */ +/* #undef HAVE_AS_S390_ZARCH */ + +/* Define if your assembler and linker support unaligned PC relative relocs. + */ +/* #undef HAVE_AS_SPARC_UA_PCREL */ + +/* Define if your assembler supports unwind section type. */ +/* #undef HAVE_AS_X86_64_UNWIND_SECTION_TYPE */ + +/* Define if your assembler supports PC relative relocs. */ +#define HAVE_AS_X86_PCREL 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define if __attribute__((visibility("hidden"))) is supported. */ +#define HAVE_HIDDEN_VISIBILITY_ATTRIBUTE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define if you have the long double type and it is bigger than a double */ +#define HAVE_LONG_DOUBLE 1 + +/* Define if you support more than one size of the long double type */ +/* #undef HAVE_LONG_DOUBLE_VARIANT */ + +/* Define to 1 if you have the `memcpy' function. */ +#define HAVE_MEMCPY 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `mkostemp' function. */ +/* #undef HAVE_MKOSTEMP */ + +/* Define to 1 if you have the `mmap' function. */ +#define HAVE_MMAP 1 + +/* Define if mmap with MAP_ANON(YMOUS) works. */ +#define HAVE_MMAP_ANON 1 + +/* Define if mmap of /dev/zero works. */ +/* #undef HAVE_MMAP_DEV_ZERO */ + +/* Define if read-only mmap of a plain file works. */ +#define HAVE_MMAP_FILE 1 + +/* Define if .eh_frame sections should be read-only. */ +/* #undef HAVE_RO_EH_FRAME */ + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_MMAN_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if GNU symbol versioning is used for libatomic. */ +/* #undef LIBFFI_GNU_SYMBOL_VERSIONING */ + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "libffi" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "http://github.com/libffi/libffi/issues" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "libffi" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "libffi 3.3-rc0" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "libffi" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "3.3-rc0" + +/* The size of `double', as computed by sizeof. */ +#define SIZEOF_DOUBLE 8 + +/* The size of `long double', as computed by sizeof. */ +#define SIZEOF_LONG_DOUBLE 16 + +/* The size of `size_t', as computed by sizeof. */ +#define SIZEOF_SIZE_T 8 + +/* If using the C implementation of alloca, define if you know the + direction of stack growth for your system; otherwise it will be + automatically deduced at runtime. + STACK_DIRECTION > 0 => grows toward higher addresses + STACK_DIRECTION < 0 => grows toward lower addresses + STACK_DIRECTION = 0 => direction of growth unknown */ +/* #undef STACK_DIRECTION */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define if symbols are underscored. */ +#define SYMBOL_UNDERSCORE 1 + +/* Define this if you are using Purify and want to suppress spurious messages. + */ +/* #undef USING_PURIFY */ + +/* Version number of package */ +#define VERSION "3.3-rc0" + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +/* # undef WORDS_BIGENDIAN */ +# endif +#endif + +/* Define to `unsigned int' if does not define. */ +/* #undef size_t */ + + +#ifdef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +#ifdef LIBFFI_ASM +#ifdef __APPLE__ +#define FFI_HIDDEN(name) .private_extern name +#else +#define FFI_HIDDEN(name) .hidden name +#endif +#else +#define FFI_HIDDEN __attribute__ ((visibility ("hidden"))) +#endif +#else +#ifdef LIBFFI_ASM +#define FFI_HIDDEN(name) +#else +#define FFI_HIDDEN +#endif +#endif + + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/platform_include/ffitarget_arm64.h b/module/src/main/cpp/whale/src/libffi/platform_include/ffitarget_arm64.h new file mode 100644 index 00000000..f085a4b2 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/platform_include/ffitarget_arm64.h @@ -0,0 +1,86 @@ +#ifdef __aarch64__ + +/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +#ifdef __ILP32__ +#define FFI_SIZEOF_ARG 8 +#define FFI_SIZEOF_JAVA_RAW 4 +typedef unsigned long long ffi_arg; +typedef signed long long ffi_sarg; +#else +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; +#endif + +typedef enum ffi_abi + { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV + } ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#if defined (FFI_EXEC_TRAMPOLINE_TABLE) && FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#define FFI_TRAMPOLINE_SIZE 16 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET 16 +#else +#error "No trampoline table implementation" +#endif + +#else +#define FFI_TRAMPOLINE_SIZE 24 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET FFI_TRAMPOLINE_SIZE +#endif + +/* ---- Internal ---- */ + +#if defined (__APPLE__) +#define FFI_TARGET_SPECIFIC_VARIADIC +#define FFI_EXTRA_CIF_FIELDS unsigned aarch64_nfixedargs +#else +/* iOS reserves x18 for the system. Disable Go closures until + a new static chain is chosen. */ +#define FFI_GO_CLOSURES 1 +#endif + +#define FFI_TARGET_HAS_COMPLEX_TYPE + +#endif + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/platform_include/ffitarget_armv7.h b/module/src/main/cpp/whale/src/libffi/platform_include/ffitarget_armv7.h new file mode 100644 index 00000000..f0e4455a --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/platform_include/ffitarget_armv7.h @@ -0,0 +1,87 @@ +#ifdef __arm__ + +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012 Anthony Green + Copyright (c) 2010 CodeSourcery + Copyright (c) 1996-2003 Red Hat, Inc. + + Target configuration macros for ARM. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +#ifndef LIBFFI_ASM +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; + +typedef enum ffi_abi { + FFI_FIRST_ABI = 0, + FFI_SYSV, + FFI_VFP, + FFI_LAST_ABI, +#ifdef __ARM_PCS_VFP + FFI_DEFAULT_ABI = FFI_VFP, +#else + FFI_DEFAULT_ABI = FFI_SYSV, +#endif +} ffi_abi; +#endif + +#define FFI_EXTRA_CIF_FIELDS \ + int vfp_used; \ + unsigned short vfp_reg_free, vfp_nargs; \ + signed char vfp_args[16] \ + +#define FFI_TARGET_SPECIFIC_VARIADIC +#define FFI_TARGET_HAS_COMPLEX_TYPE + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 +#define FFI_NATIVE_RAW_API 0 + +#if defined (FFI_EXEC_TRAMPOLINE_TABLE) && FFI_EXEC_TRAMPOLINE_TABLE + +#ifdef __MACH__ +#define FFI_TRAMPOLINE_SIZE 12 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET 8 +#else +#error "No trampoline table implementation" +#endif + +#else +#define FFI_TRAMPOLINE_SIZE 12 +#define FFI_TRAMPOLINE_CLOSURE_OFFSET FFI_TRAMPOLINE_SIZE +#endif + +#endif + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/platform_include/ffitarget_i386.h b/module/src/main/cpp/whale/src/libffi/platform_include/ffitarget_i386.h new file mode 100644 index 00000000..488b8d0e --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/platform_include/ffitarget_i386.h @@ -0,0 +1,152 @@ +#ifdef __i386__ + +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012, 2014, 2018 Anthony Green + Copyright (c) 1996-2003, 2010 Red Hat, Inc. + Copyright (C) 2008 Free Software Foundation, Inc. + + Target configuration macros for x86 and x86-64. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +/* For code common to all platforms on x86 and x86_64. */ +#define X86_ANY + +#if defined (X86_64) && defined (__i386__) +#undef X86_64 +#define X86 +#endif + +#ifdef X86_WIN64 +#define FFI_SIZEOF_ARG 8 +#define USE_BUILTIN_FFS 0 /* not yet implemented in mingw-64 */ +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION +#ifndef _MSC_VER +#define FFI_TARGET_HAS_COMPLEX_TYPE +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +#ifdef X86_WIN64 +#ifdef _MSC_VER +typedef unsigned __int64 ffi_arg; +typedef __int64 ffi_sarg; +#else +typedef unsigned long long ffi_arg; +typedef long long ffi_sarg; +#endif +#else +#if defined __x86_64__ && defined __ILP32__ +#define FFI_SIZEOF_ARG 8 +#define FFI_SIZEOF_JAVA_RAW 4 +typedef unsigned long long ffi_arg; +typedef long long ffi_sarg; +#else +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; +#endif +#endif + +typedef enum ffi_abi { +#if defined(X86_WIN64) + FFI_FIRST_ABI = 0, + FFI_WIN64, /* sizeof(long double) == 8 - microsoft compilers */ + FFI_GNUW64, /* sizeof(long double) == 16 - GNU compilers */ + FFI_LAST_ABI, +#ifdef __GNUC__ + FFI_DEFAULT_ABI = FFI_GNUW64 +#else + FFI_DEFAULT_ABI = FFI_WIN64 +#endif + +#elif defined(X86_64) || (defined (__x86_64__) && defined (X86_DARWIN)) + FFI_FIRST_ABI = 1, + FFI_UNIX64, + FFI_WIN64, + FFI_EFI64 = FFI_WIN64, + FFI_GNUW64, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_UNIX64 + +#elif defined(X86_WIN32) + FFI_FIRST_ABI = 0, + FFI_SYSV = 1, + FFI_STDCALL = 2, + FFI_THISCALL = 3, + FFI_FASTCALL = 4, + FFI_MS_CDECL = 5, + FFI_PASCAL = 6, + FFI_REGISTER = 7, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_MS_CDECL +#else + FFI_FIRST_ABI = 0, + FFI_SYSV = 1, + FFI_THISCALL = 3, + FFI_FASTCALL = 4, + FFI_STDCALL = 5, + FFI_PASCAL = 6, + FFI_REGISTER = 7, + FFI_MS_CDECL = 8, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +#endif +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 + +#define FFI_TYPE_SMALL_STRUCT_1B (FFI_TYPE_LAST + 1) +#define FFI_TYPE_SMALL_STRUCT_2B (FFI_TYPE_LAST + 2) +#define FFI_TYPE_SMALL_STRUCT_4B (FFI_TYPE_LAST + 3) +#define FFI_TYPE_MS_STRUCT (FFI_TYPE_LAST + 4) + +#if defined (X86_64) || defined(X86_WIN64) \ + || (defined (__x86_64__) && defined (X86_DARWIN)) +# define FFI_TRAMPOLINE_SIZE 24 +# define FFI_NATIVE_RAW_API 0 +#else +# define FFI_TRAMPOLINE_SIZE 12 +# define FFI_NATIVE_RAW_API 1 /* x86 has native raw api support */ +#endif + +#endif + + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/platform_include/ffitarget_x86_64.h b/module/src/main/cpp/whale/src/libffi/platform_include/ffitarget_x86_64.h new file mode 100644 index 00000000..e749adeb --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/platform_include/ffitarget_x86_64.h @@ -0,0 +1,152 @@ +#ifdef __x86_64__ + +/* -----------------------------------------------------------------*-C-*- + ffitarget.h - Copyright (c) 2012, 2014, 2018 Anthony Green + Copyright (c) 1996-2003, 2010 Red Hat, Inc. + Copyright (C) 2008 Free Software Foundation, Inc. + + Target configuration macros for x86 and x86-64. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + ----------------------------------------------------------------------- */ + +#ifndef LIBFFI_TARGET_H +#define LIBFFI_TARGET_H + +#ifndef LIBFFI_H +#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." +#endif + +/* ---- System specific configurations ----------------------------------- */ + +/* For code common to all platforms on x86 and x86_64. */ +#define X86_ANY + +#if defined (X86_64) && defined (__i386__) +#undef X86_64 +#define X86 +#endif + +#ifdef X86_WIN64 +#define FFI_SIZEOF_ARG 8 +#define USE_BUILTIN_FFS 0 /* not yet implemented in mingw-64 */ +#endif + +#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION +#ifndef _MSC_VER +#define FFI_TARGET_HAS_COMPLEX_TYPE +#endif + +/* ---- Generic type definitions ----------------------------------------- */ + +#ifndef LIBFFI_ASM +#ifdef X86_WIN64 +#ifdef _MSC_VER +typedef unsigned __int64 ffi_arg; +typedef __int64 ffi_sarg; +#else +typedef unsigned long long ffi_arg; +typedef long long ffi_sarg; +#endif +#else +#if defined __x86_64__ && defined __ILP32__ +#define FFI_SIZEOF_ARG 8 +#define FFI_SIZEOF_JAVA_RAW 4 +typedef unsigned long long ffi_arg; +typedef long long ffi_sarg; +#else +typedef unsigned long ffi_arg; +typedef signed long ffi_sarg; +#endif +#endif + +typedef enum ffi_abi { +#if defined(X86_WIN64) + FFI_FIRST_ABI = 0, + FFI_WIN64, /* sizeof(long double) == 8 - microsoft compilers */ + FFI_GNUW64, /* sizeof(long double) == 16 - GNU compilers */ + FFI_LAST_ABI, +#ifdef __GNUC__ + FFI_DEFAULT_ABI = FFI_GNUW64 +#else + FFI_DEFAULT_ABI = FFI_WIN64 +#endif + +#elif defined(X86_64) || (defined (__x86_64__) && defined (X86_DARWIN)) + FFI_FIRST_ABI = 1, + FFI_UNIX64, + FFI_WIN64, + FFI_EFI64 = FFI_WIN64, + FFI_GNUW64, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_UNIX64 + +#elif defined(X86_WIN32) + FFI_FIRST_ABI = 0, + FFI_SYSV = 1, + FFI_STDCALL = 2, + FFI_THISCALL = 3, + FFI_FASTCALL = 4, + FFI_MS_CDECL = 5, + FFI_PASCAL = 6, + FFI_REGISTER = 7, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_MS_CDECL +#else + FFI_FIRST_ABI = 0, + FFI_SYSV = 1, + FFI_THISCALL = 3, + FFI_FASTCALL = 4, + FFI_STDCALL = 5, + FFI_PASCAL = 6, + FFI_REGISTER = 7, + FFI_MS_CDECL = 8, + FFI_LAST_ABI, + FFI_DEFAULT_ABI = FFI_SYSV +#endif +} ffi_abi; +#endif + +/* ---- Definitions for closures ----------------------------------------- */ + +#define FFI_CLOSURES 1 +#define FFI_GO_CLOSURES 1 + +#define FFI_TYPE_SMALL_STRUCT_1B (FFI_TYPE_LAST + 1) +#define FFI_TYPE_SMALL_STRUCT_2B (FFI_TYPE_LAST + 2) +#define FFI_TYPE_SMALL_STRUCT_4B (FFI_TYPE_LAST + 3) +#define FFI_TYPE_MS_STRUCT (FFI_TYPE_LAST + 4) + +#if defined (X86_64) || defined(X86_WIN64) \ + || (defined (__x86_64__) && defined (X86_DARWIN)) +# define FFI_TRAMPOLINE_SIZE 24 +# define FFI_NATIVE_RAW_API 0 +#else +# define FFI_TRAMPOLINE_SIZE 12 +# define FFI_NATIVE_RAW_API 1 /* x86 has native raw api support */ +#endif + +#endif + + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/prep_cif.c b/module/src/main/cpp/whale/src/libffi/prep_cif.c new file mode 100644 index 00000000..fe054536 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/prep_cif.c @@ -0,0 +1,261 @@ +/* ----------------------------------------------------------------------- + prep_cif.c - Copyright (c) 2011, 2012 Anthony Green + Copyright (c) 1996, 1998, 2007 Red Hat, Inc. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include + +/* Round up to FFI_SIZEOF_ARG. */ + +#define STACK_ARG_SIZE(x) FFI_ALIGN(x, FFI_SIZEOF_ARG) + +/* Perform machine independent initialization of aggregate type + specifications. */ + +static ffi_status initialize_aggregate(ffi_type *arg, size_t *offsets) +{ + ffi_type **ptr; + + if (UNLIKELY(arg == NULL || arg->elements == NULL)) + return FFI_BAD_TYPEDEF; + + arg->size = 0; + arg->alignment = 0; + + ptr = &(arg->elements[0]); + + if (UNLIKELY(ptr == 0)) + return FFI_BAD_TYPEDEF; + + while ((*ptr) != NULL) + { + if (UNLIKELY(((*ptr)->size == 0) + && (initialize_aggregate((*ptr), NULL) != FFI_OK))) + return FFI_BAD_TYPEDEF; + + /* Perform a sanity check on the argument type */ + FFI_ASSERT_VALID_TYPE(*ptr); + + arg->size = FFI_ALIGN(arg->size, (*ptr)->alignment); + if (offsets) + *offsets++ = arg->size; + arg->size += (*ptr)->size; + + arg->alignment = (arg->alignment > (*ptr)->alignment) ? + arg->alignment : (*ptr)->alignment; + + ptr++; + } + + /* Structure size includes tail padding. This is important for + structures that fit in one register on ABIs like the PowerPC64 + Linux ABI that right justify small structs in a register. + It's also needed for nested structure layout, for example + struct A { long a; char b; }; struct B { struct A x; char y; }; + should find y at an offset of 2*sizeof(long) and result in a + total size of 3*sizeof(long). */ + arg->size = FFI_ALIGN (arg->size, arg->alignment); + + /* On some targets, the ABI defines that structures have an additional + alignment beyond the "natural" one based on their elements. */ +#ifdef FFI_AGGREGATE_ALIGNMENT + if (FFI_AGGREGATE_ALIGNMENT > arg->alignment) + arg->alignment = FFI_AGGREGATE_ALIGNMENT; +#endif + + if (arg->size == 0) + return FFI_BAD_TYPEDEF; + else + return FFI_OK; +} + +#ifndef __CRIS__ +/* The CRIS ABI specifies structure elements to have byte + alignment only, so it completely overrides this functions, + which assumes "natural" alignment and padding. */ + +/* Perform machine independent ffi_cif preparation, then call + machine dependent routine. */ + +/* For non variadic functions isvariadic should be 0 and + nfixedargs==ntotalargs. + + For variadic calls, isvariadic should be 1 and nfixedargs + and ntotalargs set as appropriate. nfixedargs must always be >=1 */ + + +ffi_status FFI_HIDDEN ffi_prep_cif_core(ffi_cif *cif, ffi_abi abi, + unsigned int isvariadic, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, ffi_type **atypes) +{ + unsigned bytes = 0; + unsigned int i; + ffi_type **ptr; + + FFI_ASSERT(cif != NULL); + FFI_ASSERT((!isvariadic) || (nfixedargs >= 1)); + FFI_ASSERT(nfixedargs <= ntotalargs); + + if (! (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI)) + return FFI_BAD_ABI; + + cif->abi = abi; + cif->arg_types = atypes; + cif->nargs = ntotalargs; + cif->rtype = rtype; + + cif->flags = 0; + +#if HAVE_LONG_DOUBLE_VARIANT + ffi_prep_types (abi); +#endif + + /* Initialize the return type if necessary */ + if ((cif->rtype->size == 0) + && (initialize_aggregate(cif->rtype, NULL) != FFI_OK)) + return FFI_BAD_TYPEDEF; + +#ifndef FFI_TARGET_HAS_COMPLEX_TYPE + if (rtype->type == FFI_TYPE_COMPLEX) + abort(); +#endif + /* Perform a sanity check on the return type */ + FFI_ASSERT_VALID_TYPE(cif->rtype); + + /* x86, x86-64 and s390 stack space allocation is handled in prep_machdep. */ +#if !defined FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION + /* Make space for the return structure pointer */ + if (cif->rtype->type == FFI_TYPE_STRUCT +#ifdef TILE + && (cif->rtype->size > 10 * FFI_SIZEOF_ARG) +#endif +#ifdef XTENSA + && (cif->rtype->size > 16) +#endif +#ifdef NIOS2 + && (cif->rtype->size > 8) +#endif + ) + bytes = STACK_ARG_SIZE(sizeof(void*)); +#endif + + for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) + { + + /* Initialize any uninitialized aggregate type definitions */ + if (((*ptr)->size == 0) + && (initialize_aggregate((*ptr), NULL) != FFI_OK)) + return FFI_BAD_TYPEDEF; + +#ifndef FFI_TARGET_HAS_COMPLEX_TYPE + if ((*ptr)->type == FFI_TYPE_COMPLEX) + abort(); +#endif + /* Perform a sanity check on the argument type, do this + check after the initialization. */ + FFI_ASSERT_VALID_TYPE(*ptr); + +#if !defined FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION + { + /* Add any padding if necessary */ + if (((*ptr)->alignment - 1) & bytes) + bytes = (unsigned)FFI_ALIGN(bytes, (*ptr)->alignment); + +#ifdef TILE + if (bytes < 10 * FFI_SIZEOF_ARG && + bytes + STACK_ARG_SIZE((*ptr)->size) > 10 * FFI_SIZEOF_ARG) + { + /* An argument is never split between the 10 parameter + registers and the stack. */ + bytes = 10 * FFI_SIZEOF_ARG; + } +#endif +#ifdef XTENSA + if (bytes <= 6*4 && bytes + STACK_ARG_SIZE((*ptr)->size) > 6*4) + bytes = 6*4; +#endif + + bytes += STACK_ARG_SIZE((*ptr)->size); + } +#endif + } + + cif->bytes = bytes; + + /* Perform machine dependent cif processing */ +#ifdef FFI_TARGET_SPECIFIC_VARIADIC + if (isvariadic) + return ffi_prep_cif_machdep_var(cif, nfixedargs, ntotalargs); +#endif + + return ffi_prep_cif_machdep(cif); +} +#endif /* not __CRIS__ */ + +ffi_status ffi_prep_cif(ffi_cif *cif, ffi_abi abi, unsigned int nargs, + ffi_type *rtype, ffi_type **atypes) +{ + return ffi_prep_cif_core(cif, abi, 0, nargs, nargs, rtype, atypes); +} + +ffi_status ffi_prep_cif_var(ffi_cif *cif, + ffi_abi abi, + unsigned int nfixedargs, + unsigned int ntotalargs, + ffi_type *rtype, + ffi_type **atypes) +{ + return ffi_prep_cif_core(cif, abi, 1, nfixedargs, ntotalargs, rtype, atypes); +} + +#if FFI_CLOSURES + +ffi_status +ffi_prep_closure (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data) +{ + return ffi_prep_closure_loc (closure, cif, fun, user_data, closure); +} + +#endif + +ffi_status +ffi_get_struct_offsets (ffi_abi abi, ffi_type *struct_type, size_t *offsets) +{ + if (! (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI)) + return FFI_BAD_ABI; + if (struct_type->type != FFI_TYPE_STRUCT) + return FFI_BAD_TYPEDEF; + +#if HAVE_LONG_DOUBLE_VARIANT + ffi_prep_types (abi); +#endif + + return initialize_aggregate(struct_type, offsets); +} diff --git a/module/src/main/cpp/whale/src/libffi/raw_api.c b/module/src/main/cpp/whale/src/libffi/raw_api.c new file mode 100644 index 00000000..be156116 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/raw_api.c @@ -0,0 +1,267 @@ +/* ----------------------------------------------------------------------- + raw_api.c - Copyright (c) 1999, 2008 Red Hat, Inc. + + Author: Kresten Krab Thorup + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* This file defines generic functions for use with the raw api. */ + +#include +#include + +#if !FFI_NO_RAW_API + +size_t +ffi_raw_size (ffi_cif *cif) +{ + size_t result = 0; + int i; + + ffi_type **at = cif->arg_types; + + for (i = cif->nargs-1; i >= 0; i--, at++) + { +#if !FFI_NO_STRUCTS + if ((*at)->type == FFI_TYPE_STRUCT) + result += FFI_ALIGN (sizeof (void*), FFI_SIZEOF_ARG); + else +#endif + result += FFI_ALIGN ((*at)->size, FFI_SIZEOF_ARG); + } + + return result; +} + + +void +ffi_raw_to_ptrarray (ffi_cif *cif, ffi_raw *raw, void **args) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + +#if WORDS_BIGENDIAN + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + *args = (void*) ((char*)(raw++) + FFI_SIZEOF_ARG - 1); + break; + + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + *args = (void*) ((char*)(raw++) + FFI_SIZEOF_ARG - 2); + break; + +#if FFI_SIZEOF_ARG >= 4 + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + *args = (void*) ((char*)(raw++) + FFI_SIZEOF_ARG - 4); + break; +#endif + +#if !FFI_NO_STRUCTS + case FFI_TYPE_STRUCT: + *args = (raw++)->ptr; + break; +#endif + + case FFI_TYPE_COMPLEX: + *args = (raw++)->ptr; + break; + + case FFI_TYPE_POINTER: + *args = (void*) &(raw++)->ptr; + break; + + default: + *args = raw; + raw += FFI_ALIGN ((*tp)->size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + } + } + +#else /* WORDS_BIGENDIAN */ + +#if !PDP + + /* then assume little endian */ + for (i = 0; i < cif->nargs; i++, tp++, args++) + { +#if !FFI_NO_STRUCTS + if ((*tp)->type == FFI_TYPE_STRUCT) + { + *args = (raw++)->ptr; + } + else +#endif + if ((*tp)->type == FFI_TYPE_COMPLEX) + { + *args = (raw++)->ptr; + } + else + { + *args = (void*) raw; + raw += FFI_ALIGN ((*tp)->size, sizeof (void*)) / sizeof (void*); + } + } + +#else +#error "pdp endian not supported" +#endif /* ! PDP */ + +#endif /* WORDS_BIGENDIAN */ +} + +void +ffi_ptrarray_to_raw (ffi_cif *cif, void **args, ffi_raw *raw) +{ + unsigned i; + ffi_type **tp = cif->arg_types; + + for (i = 0; i < cif->nargs; i++, tp++, args++) + { + switch ((*tp)->type) + { + case FFI_TYPE_UINT8: + (raw++)->uint = *(UINT8*) (*args); + break; + + case FFI_TYPE_SINT8: + (raw++)->sint = *(SINT8*) (*args); + break; + + case FFI_TYPE_UINT16: + (raw++)->uint = *(UINT16*) (*args); + break; + + case FFI_TYPE_SINT16: + (raw++)->sint = *(SINT16*) (*args); + break; + +#if FFI_SIZEOF_ARG >= 4 + case FFI_TYPE_UINT32: + (raw++)->uint = *(UINT32*) (*args); + break; + + case FFI_TYPE_SINT32: + (raw++)->sint = *(SINT32*) (*args); + break; +#endif + +#if !FFI_NO_STRUCTS + case FFI_TYPE_STRUCT: + (raw++)->ptr = *args; + break; +#endif + + case FFI_TYPE_COMPLEX: + (raw++)->ptr = *args; + break; + + case FFI_TYPE_POINTER: + (raw++)->ptr = **(void***) args; + break; + + default: + memcpy ((void*) raw->data, (void*)*args, (*tp)->size); + raw += FFI_ALIGN ((*tp)->size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; + } + } +} + +#if !FFI_NATIVE_RAW_API + + +/* This is a generic definition of ffi_raw_call, to be used if the + * native system does not provide a machine-specific implementation. + * Having this, allows code to be written for the raw API, without + * the need for system-specific code to handle input in that format; + * these following couple of functions will handle the translation forth + * and back automatically. */ + +void ffi_raw_call (ffi_cif *cif, void (*fn)(void), void *rvalue, ffi_raw *raw) +{ + void **avalue = (void**) alloca (cif->nargs * sizeof (void*)); + ffi_raw_to_ptrarray (cif, raw, avalue); + ffi_call (cif, fn, rvalue, avalue); +} + +#if FFI_CLOSURES /* base system provides closures */ + +static void +ffi_translate_args (ffi_cif *cif, void *rvalue, + void **avalue, void *user_data) +{ + ffi_raw *raw = (ffi_raw*)alloca (ffi_raw_size (cif)); + ffi_raw_closure *cl = (ffi_raw_closure*)user_data; + + ffi_ptrarray_to_raw (cif, avalue, raw); + (*cl->fun) (cif, rvalue, raw, cl->user_data); +} + +ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc) +{ + ffi_status status; + + status = ffi_prep_closure_loc ((ffi_closure*) cl, + cif, + &ffi_translate_args, + codeloc, + codeloc); + if (status == FFI_OK) + { + cl->fun = fun; + cl->user_data = user_data; + } + + return status; +} + +#endif /* FFI_CLOSURES */ +#endif /* !FFI_NATIVE_RAW_API */ + +#if FFI_CLOSURES + +/* Again, here is the generic version of ffi_prep_raw_closure, which + * will install an intermediate "hub" for translation of arguments from + * the pointer-array format, to the raw format */ + +ffi_status +ffi_prep_raw_closure (ffi_raw_closure* cl, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data) +{ + return ffi_prep_raw_closure_loc (cl, cif, fun, user_data, cl); +} + +#endif /* FFI_CLOSURES */ + +#endif /* !FFI_NO_RAW_API */ diff --git a/module/src/main/cpp/whale/src/libffi/types.c b/module/src/main/cpp/whale/src/libffi/types.c new file mode 100644 index 00000000..9ec27f6c --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/types.c @@ -0,0 +1,108 @@ +/* ----------------------------------------------------------------------- + types.c - Copyright (c) 1996, 1998 Red Hat, Inc. + + Predefined ffi_types needed by libffi. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +/* Hide the basic type definitions from the header file, so that we + can redefine them here as "const". */ +#define LIBFFI_HIDE_BASIC_TYPES + +#include +#include + +/* Type definitions */ + +#define FFI_TYPEDEF(name, type, id, maybe_const)\ +struct struct_align_##name { \ + char c; \ + type x; \ +}; \ +FFI_EXTERN \ +maybe_const ffi_type ffi_type_##name = { \ + sizeof(type), \ + offsetof(struct struct_align_##name, x), \ + id, NULL \ +} + +#define FFI_COMPLEX_TYPEDEF(name, type, maybe_const) \ +static ffi_type *ffi_elements_complex_##name [2] = { \ + (ffi_type *)(&ffi_type_##name), NULL \ +}; \ +struct struct_align_complex_##name { \ + char c; \ + _Complex type x; \ +}; \ +FFI_EXTERN \ +maybe_const ffi_type ffi_type_complex_##name = { \ + sizeof(_Complex type), \ + offsetof(struct struct_align_complex_##name, x), \ + FFI_TYPE_COMPLEX, \ + (ffi_type **)ffi_elements_complex_##name \ +} + +/* Size and alignment are fake here. They must not be 0. */ +FFI_EXTERN const ffi_type ffi_type_void = { + 1, 1, FFI_TYPE_VOID, NULL +}; + +FFI_TYPEDEF(uint8, UINT8, FFI_TYPE_UINT8, const); +FFI_TYPEDEF(sint8, SINT8, FFI_TYPE_SINT8, const); +FFI_TYPEDEF(uint16, UINT16, FFI_TYPE_UINT16, const); +FFI_TYPEDEF(sint16, SINT16, FFI_TYPE_SINT16, const); +FFI_TYPEDEF(uint32, UINT32, FFI_TYPE_UINT32, const); +FFI_TYPEDEF(sint32, SINT32, FFI_TYPE_SINT32, const); +FFI_TYPEDEF(uint64, UINT64, FFI_TYPE_UINT64, const); +FFI_TYPEDEF(sint64, SINT64, FFI_TYPE_SINT64, const); + +FFI_TYPEDEF(pointer, void*, FFI_TYPE_POINTER, const); + +FFI_TYPEDEF(float, float, FFI_TYPE_FLOAT, const); +FFI_TYPEDEF(double, double, FFI_TYPE_DOUBLE, const); + +#if !defined HAVE_LONG_DOUBLE_VARIANT || defined __alpha__ +#define FFI_LDBL_CONST const +#else +#define FFI_LDBL_CONST +#endif + +#ifdef __alpha__ +/* Even if we're not configured to default to 128-bit long double, + maintain binary compatibility, as -mlong-double-128 can be used + at any time. */ +/* Validate the hard-coded number below. */ +# if defined(__LONG_DOUBLE_128__) && FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +const ffi_type ffi_type_longdouble = { 16, 16, 4, NULL }; +#elif FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +FFI_TYPEDEF(longdouble, long double, FFI_TYPE_LONGDOUBLE, FFI_LDBL_CONST); +#endif + +#ifdef FFI_TARGET_HAS_COMPLEX_TYPE +FFI_COMPLEX_TYPEDEF(float, float, const); +FFI_COMPLEX_TYPEDEF(double, double, const); +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +FFI_COMPLEX_TYPEDEF(longdouble, long double, FFI_LDBL_CONST); +#endif +#endif diff --git a/module/src/main/cpp/whale/src/libffi/x86/asmnames.h b/module/src/main/cpp/whale/src/libffi/x86/asmnames.h new file mode 100644 index 00000000..1b18f69a --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/x86/asmnames.h @@ -0,0 +1,35 @@ +#ifdef __x86_64__ + +#ifndef ASMNAMES_H +#define ASMNAMES_H + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) +#ifdef __USER_LABEL_PREFIX__ +# define C(X) C1(__USER_LABEL_PREFIX__, X) +#else +# define C(X) X +#endif + +#ifdef __APPLE__ +# define L(X) C1(L, X) +#else +# define L(X) C1(.L, X) +#endif + +#if defined(__ELF__) && defined(__PIC__) +# define PLT(X) X@PLT +#else +# define PLT(X) X +#endif + +#ifdef __ELF__ +# define ENDF(X) .type X,@function; .size X, . - X +#else +# define ENDF(X) +#endif + +#endif /* ASMNAMES_H */ + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/x86/ffi64_x86_64.c b/module/src/main/cpp/whale/src/libffi/x86/ffi64_x86_64.c new file mode 100644 index 00000000..ddd97688 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/x86/ffi64_x86_64.c @@ -0,0 +1,889 @@ +#ifdef __x86_64__ + +/* ----------------------------------------------------------------------- + ffi64.c - Copyright (c) 2011, 2018 Anthony Green + Copyright (c) 2013 The Written Word, Inc. + Copyright (c) 2008, 2010 Red Hat, Inc. + Copyright (c) 2002, 2007 Bo Thorsen + + x86-64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include + +#include +#include +#include +#include "internal64.h" + +#ifdef __x86_64__ + +#define MAX_GPR_REGS 6 +#define MAX_SSE_REGS 8 + +#if defined(__INTEL_COMPILER) +#include "xmmintrin.h" +#define UINT128 __m128 +#else +#if defined(__SUNPRO_C) +#include +#define UINT128 __m128i +#else +#define UINT128 __int128_t +#endif +#endif + +union big_int_union +{ + UINT32 i32; + UINT64 i64; + UINT128 i128; +}; + +struct register_args +{ + /* Registers for argument passing. */ + UINT64 gpr[MAX_GPR_REGS]; + union big_int_union sse[MAX_SSE_REGS]; + UINT64 rax; /* ssecount */ + UINT64 r10; /* static chain */ +}; + +extern void ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags, + void *raddr, void (*fnaddr)(void)) FFI_HIDDEN; + +/* All reference to register classes here is identical to the code in + gcc/config/i386/i386.c. Do *not* change one without the other. */ + +/* Register class used for passing given 64bit part of the argument. + These represent classes as documented by the PS ABI, with the + exception of SSESF, SSEDF classes, that are basically SSE class, + just gcc will use SF or DFmode move instead of DImode to avoid + reformatting penalties. + + Similary we play games with INTEGERSI_CLASS to use cheaper SImode moves + whenever possible (upper half does contain padding). */ +enum x86_64_reg_class + { + X86_64_NO_CLASS, + X86_64_INTEGER_CLASS, + X86_64_INTEGERSI_CLASS, + X86_64_SSE_CLASS, + X86_64_SSESF_CLASS, + X86_64_SSEDF_CLASS, + X86_64_SSEUP_CLASS, + X86_64_X87_CLASS, + X86_64_X87UP_CLASS, + X86_64_COMPLEX_X87_CLASS, + X86_64_MEMORY_CLASS + }; + +#define MAX_CLASSES 4 + +#define SSE_CLASS_P(X) ((X) >= X86_64_SSE_CLASS && X <= X86_64_SSEUP_CLASS) + +/* x86-64 register passing implementation. See x86-64 ABI for details. Goal + of this code is to classify each 8bytes of incoming argument by the register + class and assign registers accordingly. */ + +/* Return the union class of CLASS1 and CLASS2. + See the x86-64 PS ABI for details. */ + +static enum x86_64_reg_class +merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2) +{ + /* Rule #1: If both classes are equal, this is the resulting class. */ + if (class1 == class2) + return class1; + + /* Rule #2: If one of the classes is NO_CLASS, the resulting class is + the other class. */ + if (class1 == X86_64_NO_CLASS) + return class2; + if (class2 == X86_64_NO_CLASS) + return class1; + + /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */ + if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS) + return X86_64_MEMORY_CLASS; + + /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */ + if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS) + || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS)) + return X86_64_INTEGERSI_CLASS; + if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS + || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS) + return X86_64_INTEGER_CLASS; + + /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class, + MEMORY is used. */ + if (class1 == X86_64_X87_CLASS + || class1 == X86_64_X87UP_CLASS + || class1 == X86_64_COMPLEX_X87_CLASS + || class2 == X86_64_X87_CLASS + || class2 == X86_64_X87UP_CLASS + || class2 == X86_64_COMPLEX_X87_CLASS) + return X86_64_MEMORY_CLASS; + + /* Rule #6: Otherwise class SSE is used. */ + return X86_64_SSE_CLASS; +} + +/* Classify the argument of type TYPE and mode MODE. + CLASSES will be filled by the register class used to pass each word + of the operand. The number of words is returned. In case the parameter + should be passed in memory, 0 is returned. As a special case for zero + sized containers, classes[0] will be NO_CLASS and 1 is returned. + + See the x86-64 PS ABI for details. +*/ +static size_t +classify_argument (ffi_type *type, enum x86_64_reg_class classes[], + size_t byte_offset) +{ + switch (type->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + case FFI_TYPE_POINTER: + do_integer: + { + size_t size = byte_offset + type->size; + + if (size <= 4) + { + classes[0] = X86_64_INTEGERSI_CLASS; + return 1; + } + else if (size <= 8) + { + classes[0] = X86_64_INTEGER_CLASS; + return 1; + } + else if (size <= 12) + { + classes[0] = X86_64_INTEGER_CLASS; + classes[1] = X86_64_INTEGERSI_CLASS; + return 2; + } + else if (size <= 16) + { + classes[0] = classes[1] = X86_64_INTEGER_CLASS; + return 2; + } + else + FFI_ASSERT (0); + } + case FFI_TYPE_FLOAT: + if (!(byte_offset % 8)) + classes[0] = X86_64_SSESF_CLASS; + else + classes[0] = X86_64_SSE_CLASS; + return 1; + case FFI_TYPE_DOUBLE: + classes[0] = X86_64_SSEDF_CLASS; + return 1; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + classes[0] = X86_64_X87_CLASS; + classes[1] = X86_64_X87UP_CLASS; + return 2; +#endif + case FFI_TYPE_STRUCT: + { + const size_t UNITS_PER_WORD = 8; + size_t words = (type->size + UNITS_PER_WORD - 1) / UNITS_PER_WORD; + ffi_type **ptr; + unsigned int i; + enum x86_64_reg_class subclasses[MAX_CLASSES]; + + /* If the struct is larger than 32 bytes, pass it on the stack. */ + if (type->size > 32) + return 0; + + for (i = 0; i < words; i++) + classes[i] = X86_64_NO_CLASS; + + /* Zero sized arrays or structures are NO_CLASS. We return 0 to + signalize memory class, so handle it as special case. */ + if (!words) + { + case FFI_TYPE_VOID: + classes[0] = X86_64_NO_CLASS; + return 1; + } + + /* Merge the fields of structure. */ + for (ptr = type->elements; *ptr != NULL; ptr++) + { + size_t num; + + byte_offset = FFI_ALIGN (byte_offset, (*ptr)->alignment); + + num = classify_argument (*ptr, subclasses, byte_offset % 8); + if (num == 0) + return 0; + for (i = 0; i < num; i++) + { + size_t pos = byte_offset / 8; + classes[i + pos] = + merge_classes (subclasses[i], classes[i + pos]); + } + + byte_offset += (*ptr)->size; + } + + if (words > 2) + { + /* When size > 16 bytes, if the first one isn't + X86_64_SSE_CLASS or any other ones aren't + X86_64_SSEUP_CLASS, everything should be passed in + memory. */ + if (classes[0] != X86_64_SSE_CLASS) + return 0; + + for (i = 1; i < words; i++) + if (classes[i] != X86_64_SSEUP_CLASS) + return 0; + } + + /* Final merger cleanup. */ + for (i = 0; i < words; i++) + { + /* If one class is MEMORY, everything should be passed in + memory. */ + if (classes[i] == X86_64_MEMORY_CLASS) + return 0; + + /* The X86_64_SSEUP_CLASS should be always preceded by + X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */ + if (classes[i] == X86_64_SSEUP_CLASS + && classes[i - 1] != X86_64_SSE_CLASS + && classes[i - 1] != X86_64_SSEUP_CLASS) + { + /* The first one should never be X86_64_SSEUP_CLASS. */ + FFI_ASSERT (i != 0); + classes[i] = X86_64_SSE_CLASS; + } + + /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS, + everything should be passed in memory. */ + if (classes[i] == X86_64_X87UP_CLASS + && (classes[i - 1] != X86_64_X87_CLASS)) + { + /* The first one should never be X86_64_X87UP_CLASS. */ + FFI_ASSERT (i != 0); + return 0; + } + } + return words; + } + case FFI_TYPE_COMPLEX: + { + ffi_type *inner = type->elements[0]; + switch (inner->type) + { + case FFI_TYPE_INT: + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + goto do_integer; + + case FFI_TYPE_FLOAT: + classes[0] = X86_64_SSE_CLASS; + if (byte_offset % 8) + { + classes[1] = X86_64_SSESF_CLASS; + return 2; + } + return 1; + case FFI_TYPE_DOUBLE: + classes[0] = classes[1] = X86_64_SSEDF_CLASS; + return 2; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + classes[0] = X86_64_COMPLEX_X87_CLASS; + return 1; +#endif + } + } + } + abort(); +} + +/* Examine the argument and return set number of register required in each + class. Return zero iff parameter should be passed in memory, otherwise + the number of registers. */ + +static size_t +examine_argument (ffi_type *type, enum x86_64_reg_class classes[MAX_CLASSES], + _Bool in_return, int *pngpr, int *pnsse) +{ + size_t n; + unsigned int i; + int ngpr, nsse; + + n = classify_argument (type, classes, 0); + if (n == 0) + return 0; + + ngpr = nsse = 0; + for (i = 0; i < n; ++i) + switch (classes[i]) + { + case X86_64_INTEGER_CLASS: + case X86_64_INTEGERSI_CLASS: + ngpr++; + break; + case X86_64_SSE_CLASS: + case X86_64_SSESF_CLASS: + case X86_64_SSEDF_CLASS: + nsse++; + break; + case X86_64_NO_CLASS: + case X86_64_SSEUP_CLASS: + break; + case X86_64_X87_CLASS: + case X86_64_X87UP_CLASS: + case X86_64_COMPLEX_X87_CLASS: + return in_return != 0; + default: + abort (); + } + + *pngpr = ngpr; + *pnsse = nsse; + + return n; +} + +/* Perform machine dependent cif processing. */ + +#ifndef __ILP32__ +extern ffi_status +ffi_prep_cif_machdep_efi64(ffi_cif *cif); +#endif + +ffi_status +ffi_prep_cif_machdep (ffi_cif *cif) +{ + int gprcount, ssecount, i, avn, ngpr, nsse; + unsigned flags; + enum x86_64_reg_class classes[MAX_CLASSES]; + size_t bytes, n, rtype_size; + ffi_type *rtype; + +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + return ffi_prep_cif_machdep_efi64(cif); +#endif + if (cif->abi != FFI_UNIX64) + return FFI_BAD_ABI; + + gprcount = ssecount = 0; + + rtype = cif->rtype; + rtype_size = rtype->size; + switch (rtype->type) + { + case FFI_TYPE_VOID: + flags = UNIX64_RET_VOID; + break; + case FFI_TYPE_UINT8: + flags = UNIX64_RET_UINT8; + break; + case FFI_TYPE_SINT8: + flags = UNIX64_RET_SINT8; + break; + case FFI_TYPE_UINT16: + flags = UNIX64_RET_UINT16; + break; + case FFI_TYPE_SINT16: + flags = UNIX64_RET_SINT16; + break; + case FFI_TYPE_UINT32: + flags = UNIX64_RET_UINT32; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + flags = UNIX64_RET_SINT32; + break; + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + flags = UNIX64_RET_INT64; + break; + case FFI_TYPE_POINTER: + flags = (sizeof(void *) == 4 ? UNIX64_RET_UINT32 : UNIX64_RET_INT64); + break; + case FFI_TYPE_FLOAT: + flags = UNIX64_RET_XMM32; + break; + case FFI_TYPE_DOUBLE: + flags = UNIX64_RET_XMM64; + break; + case FFI_TYPE_LONGDOUBLE: + flags = UNIX64_RET_X87; + break; + case FFI_TYPE_STRUCT: + n = examine_argument (cif->rtype, classes, 1, &ngpr, &nsse); + if (n == 0) + { + /* The return value is passed in memory. A pointer to that + memory is the first argument. Allocate a register for it. */ + gprcount++; + /* We don't have to do anything in asm for the return. */ + flags = UNIX64_RET_VOID | UNIX64_FLAG_RET_IN_MEM; + } + else + { + _Bool sse0 = SSE_CLASS_P (classes[0]); + + if (rtype_size == 4 && sse0) + flags = UNIX64_RET_XMM32; + else if (rtype_size == 8) + flags = sse0 ? UNIX64_RET_XMM64 : UNIX64_RET_INT64; + else + { + _Bool sse1 = n == 2 && SSE_CLASS_P (classes[1]); + if (sse0 && sse1) + flags = UNIX64_RET_ST_XMM0_XMM1; + else if (sse0) + flags = UNIX64_RET_ST_XMM0_RAX; + else if (sse1) + flags = UNIX64_RET_ST_RAX_XMM0; + else + flags = UNIX64_RET_ST_RAX_RDX; + flags |= rtype_size << UNIX64_SIZE_SHIFT; + } + } + break; + case FFI_TYPE_COMPLEX: + switch (rtype->elements[0]->type) + { + case FFI_TYPE_UINT8: + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT16: + case FFI_TYPE_SINT16: + case FFI_TYPE_INT: + case FFI_TYPE_UINT32: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT64: + case FFI_TYPE_SINT64: + flags = UNIX64_RET_ST_RAX_RDX | ((unsigned) rtype_size << UNIX64_SIZE_SHIFT); + break; + case FFI_TYPE_FLOAT: + flags = UNIX64_RET_XMM64; + break; + case FFI_TYPE_DOUBLE: + flags = UNIX64_RET_ST_XMM0_XMM1 | (16 << UNIX64_SIZE_SHIFT); + break; +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE + case FFI_TYPE_LONGDOUBLE: + flags = UNIX64_RET_X87_2; + break; +#endif + default: + return FFI_BAD_TYPEDEF; + } + break; + default: + return FFI_BAD_TYPEDEF; + } + + /* Go over all arguments and determine the way they should be passed. + If it's in a register and there is space for it, let that be so. If + not, add it's size to the stack byte count. */ + for (bytes = 0, i = 0, avn = cif->nargs; i < avn; i++) + { + if (examine_argument (cif->arg_types[i], classes, 0, &ngpr, &nsse) == 0 + || gprcount + ngpr > MAX_GPR_REGS + || ssecount + nsse > MAX_SSE_REGS) + { + long align = cif->arg_types[i]->alignment; + + if (align < 8) + align = 8; + + bytes = FFI_ALIGN (bytes, align); + bytes += cif->arg_types[i]->size; + } + else + { + gprcount += ngpr; + ssecount += nsse; + } + } + if (ssecount) + flags |= UNIX64_FLAG_XMM_ARGS; + + cif->flags = flags; + cif->bytes = (unsigned) FFI_ALIGN (bytes, 8); + + return FFI_OK; +} + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + enum x86_64_reg_class classes[MAX_CLASSES]; + char *stack, *argp; + ffi_type **arg_types; + int gprcount, ssecount, ngpr, nsse, i, avn, flags; + struct register_args *reg_args; + + /* Can't call 32-bit mode from 64-bit mode. */ + FFI_ASSERT (cif->abi == FFI_UNIX64); + + /* If the return value is a struct and we don't have a return value + address then we need to make one. Otherwise we can ignore it. */ + flags = cif->flags; + if (rvalue == NULL) + { + if (flags & UNIX64_FLAG_RET_IN_MEM) + rvalue = alloca (cif->rtype->size); + else + flags = UNIX64_RET_VOID; + } + + /* Allocate the space for the arguments, plus 4 words of temp space. */ + stack = alloca (sizeof (struct register_args) + cif->bytes + 4*8); + reg_args = (struct register_args *) stack; + argp = stack + sizeof (struct register_args); + + reg_args->r10 = (uintptr_t) closure; + + gprcount = ssecount = 0; + + /* If the return value is passed in memory, add the pointer as the + first integer argument. */ + if (flags & UNIX64_FLAG_RET_IN_MEM) + reg_args->gpr[gprcount++] = (unsigned long) rvalue; + + avn = cif->nargs; + arg_types = cif->arg_types; + + for (i = 0; i < avn; ++i) + { + size_t n, size = arg_types[i]->size; + + n = examine_argument (arg_types[i], classes, 0, &ngpr, &nsse); + if (n == 0 + || gprcount + ngpr > MAX_GPR_REGS + || ssecount + nsse > MAX_SSE_REGS) + { + long align = arg_types[i]->alignment; + + /* Stack arguments are *always* at least 8 byte aligned. */ + if (align < 8) + align = 8; + + /* Pass this argument in memory. */ + argp = (void *) FFI_ALIGN (argp, align); + memcpy (argp, avalue[i], size); + argp += size; + } + else + { + /* The argument is passed entirely in registers. */ + char *a = (char *) avalue[i]; + unsigned int j; + + for (j = 0; j < n; j++, a += 8, size -= 8) + { + switch (classes[j]) + { + case X86_64_NO_CLASS: + case X86_64_SSEUP_CLASS: + break; + case X86_64_INTEGER_CLASS: + case X86_64_INTEGERSI_CLASS: + /* Sign-extend integer arguments passed in general + purpose registers, to cope with the fact that + LLVM incorrectly assumes that this will be done + (the x86-64 PS ABI does not specify this). */ + switch (arg_types[i]->type) + { + case FFI_TYPE_SINT8: + reg_args->gpr[gprcount] = (SINT64) *((SINT8 *) a); + break; + case FFI_TYPE_SINT16: + reg_args->gpr[gprcount] = (SINT64) *((SINT16 *) a); + break; + case FFI_TYPE_SINT32: + reg_args->gpr[gprcount] = (SINT64) *((SINT32 *) a); + break; + default: + reg_args->gpr[gprcount] = 0; + memcpy (®_args->gpr[gprcount], a, size); + } + gprcount++; + break; + case X86_64_SSE_CLASS: + case X86_64_SSEDF_CLASS: + memcpy (®_args->sse[ssecount++].i64, a, sizeof(UINT64)); + break; + case X86_64_SSESF_CLASS: + memcpy (®_args->sse[ssecount++].i32, a, sizeof(UINT32)); + break; + default: + abort(); + } + } + } + } + reg_args->rax = ssecount; + + ffi_call_unix64 (stack, cif->bytes + sizeof (struct register_args), + flags, rvalue, fn); +} + +#ifndef __ILP32__ +extern void +ffi_call_efi64(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue); +#endif + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + { + ffi_call_efi64(cif, fn, rvalue, avalue); + return; + } +#endif + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +#ifndef __ILP32__ +extern void +ffi_call_go_efi64(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure); +#endif + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + { + ffi_call_go_efi64(cif, fn, rvalue, avalue, closure); + return; + } +#endif + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + + +extern void ffi_closure_unix64(void) FFI_HIDDEN; +extern void ffi_closure_unix64_sse(void) FFI_HIDDEN; + +#ifndef __ILP32__ +extern ffi_status +ffi_prep_closure_loc_efi64(ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc); +#endif + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + static const unsigned char trampoline[16] = { + /* leaq -0x7(%rip),%r10 # 0x0 */ + 0x4c, 0x8d, 0x15, 0xf9, 0xff, 0xff, 0xff, + /* jmpq *0x3(%rip) # 0x10 */ + 0xff, 0x25, 0x03, 0x00, 0x00, 0x00, + /* nopl (%rax) */ + 0x0f, 0x1f, 0x00 + }; + void (*dest)(void); + char *tramp = closure->tramp; + +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + return ffi_prep_closure_loc_efi64(closure, cif, fun, user_data, codeloc); +#endif + if (cif->abi != FFI_UNIX64) + return FFI_BAD_ABI; + + if (cif->flags & UNIX64_FLAG_XMM_ARGS) + dest = ffi_closure_unix64_sse; + else + dest = ffi_closure_unix64; + + memcpy (tramp, trampoline, sizeof(trampoline)); + *(UINT64 *)(tramp + 16) = (uintptr_t)dest; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +int FFI_HIDDEN +ffi_closure_unix64_inner(ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *rvalue, + struct register_args *reg_args, + char *argp) +{ + void **avalue; + ffi_type **arg_types; + long i, avn; + int gprcount, ssecount, ngpr, nsse; + int flags; + + avn = cif->nargs; + flags = cif->flags; + avalue = alloca(avn * sizeof(void *)); + gprcount = ssecount = 0; + + if (flags & UNIX64_FLAG_RET_IN_MEM) + { + /* On return, %rax will contain the address that was passed + by the caller in %rdi. */ + void *r = (void *)(uintptr_t)reg_args->gpr[gprcount++]; + *(void **)rvalue = r; + rvalue = r; + flags = (sizeof(void *) == 4 ? UNIX64_RET_UINT32 : UNIX64_RET_INT64); + } + + arg_types = cif->arg_types; + for (i = 0; i < avn; ++i) + { + enum x86_64_reg_class classes[MAX_CLASSES]; + size_t n; + + n = examine_argument (arg_types[i], classes, 0, &ngpr, &nsse); + if (n == 0 + || gprcount + ngpr > MAX_GPR_REGS + || ssecount + nsse > MAX_SSE_REGS) + { + long align = arg_types[i]->alignment; + + /* Stack arguments are *always* at least 8 byte aligned. */ + if (align < 8) + align = 8; + + /* Pass this argument in memory. */ + argp = (void *) FFI_ALIGN (argp, align); + avalue[i] = argp; + argp += arg_types[i]->size; + } + /* If the argument is in a single register, or two consecutive + integer registers, then we can use that address directly. */ + else if (n == 1 + || (n == 2 && !(SSE_CLASS_P (classes[0]) + || SSE_CLASS_P (classes[1])))) + { + /* The argument is in a single register. */ + if (SSE_CLASS_P (classes[0])) + { + avalue[i] = ®_args->sse[ssecount]; + ssecount += n; + } + else + { + avalue[i] = ®_args->gpr[gprcount]; + gprcount += n; + } + } + /* Otherwise, allocate space to make them consecutive. */ + else + { + char *a = alloca (16); + unsigned int j; + + avalue[i] = a; + for (j = 0; j < n; j++, a += 8) + { + if (SSE_CLASS_P (classes[j])) + memcpy (a, ®_args->sse[ssecount++], 8); + else + memcpy (a, ®_args->gpr[gprcount++], 8); + } + } + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + + /* Tell assembly how to perform return type promotions. */ + return flags; +} + +extern void ffi_go_closure_unix64(void) FFI_HIDDEN; +extern void ffi_go_closure_unix64_sse(void) FFI_HIDDEN; + +#ifndef __ILP32__ +extern ffi_status +ffi_prep_go_closure_efi64(ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)); +#endif + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ +#ifndef __ILP32__ + if (cif->abi == FFI_EFI64 || cif->abi == FFI_GNUW64) + return ffi_prep_go_closure_efi64(closure, cif, fun); +#endif + if (cif->abi != FFI_UNIX64) + return FFI_BAD_ABI; + + closure->tramp = (cif->flags & UNIX64_FLAG_XMM_ARGS + ? ffi_go_closure_unix64_sse + : ffi_go_closure_unix64); + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +#endif /* __x86_64__ */ + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/x86/ffi_i386.c b/module/src/main/cpp/whale/src/libffi/x86/ffi_i386.c new file mode 100644 index 00000000..89b49b41 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/x86/ffi_i386.c @@ -0,0 +1,759 @@ +#ifdef __i386__ + +/* ----------------------------------------------------------------------- + ffi.c - Copyright (c) 2017 Anthony Green + Copyright (c) 1996, 1998, 1999, 2001, 2007, 2008 Red Hat, Inc. + Copyright (c) 2002 Ranjit Mathew + Copyright (c) 2002 Bo Thorsen + Copyright (c) 2002 Roger Sayle + Copyright (C) 2008, 2010 Free Software Foundation, Inc. + + x86 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef __x86_64__ +#include +#include +#include +#include +#include "internal.h" + +/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; + all further uses in this file will refer to the 80-bit type. */ +#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE +# if FFI_TYPE_LONGDOUBLE != 4 +# error FFI_TYPE_LONGDOUBLE out of date +# endif +#else +# undef FFI_TYPE_LONGDOUBLE +# define FFI_TYPE_LONGDOUBLE 4 +#endif + +#if defined(__GNUC__) && !defined(__declspec) +# define __declspec(x) __attribute__((x)) +#endif + +/* Perform machine dependent cif processing. */ +ffi_status FFI_HIDDEN +ffi_prep_cif_machdep(ffi_cif *cif) +{ + size_t bytes = 0; + int i, n, flags, cabi = cif->abi; + + switch (cabi) + { + case FFI_SYSV: + case FFI_STDCALL: + case FFI_THISCALL: + case FFI_FASTCALL: + case FFI_MS_CDECL: + case FFI_PASCAL: + case FFI_REGISTER: + break; + default: + return FFI_BAD_ABI; + } + + switch (cif->rtype->type) + { + case FFI_TYPE_VOID: + flags = X86_RET_VOID; + break; + case FFI_TYPE_FLOAT: + flags = X86_RET_FLOAT; + break; + case FFI_TYPE_DOUBLE: + flags = X86_RET_DOUBLE; + break; + case FFI_TYPE_LONGDOUBLE: + flags = X86_RET_LDOUBLE; + break; + case FFI_TYPE_UINT8: + flags = X86_RET_UINT8; + break; + case FFI_TYPE_UINT16: + flags = X86_RET_UINT16; + break; + case FFI_TYPE_SINT8: + flags = X86_RET_SINT8; + break; + case FFI_TYPE_SINT16: + flags = X86_RET_SINT16; + break; + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + flags = X86_RET_INT32; + break; + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + flags = X86_RET_INT64; + break; + case FFI_TYPE_STRUCT: +#ifndef X86 + /* ??? This should be a different ABI rather than an ifdef. */ + if (cif->rtype->size == 1) + flags = X86_RET_STRUCT_1B; + else if (cif->rtype->size == 2) + flags = X86_RET_STRUCT_2B; + else if (cif->rtype->size == 4) + flags = X86_RET_INT32; + else if (cif->rtype->size == 8) + flags = X86_RET_INT64; + else +#endif + { + do_struct: + switch (cabi) + { + case FFI_THISCALL: + case FFI_FASTCALL: + case FFI_STDCALL: + case FFI_MS_CDECL: + flags = X86_RET_STRUCTARG; + break; + default: + flags = X86_RET_STRUCTPOP; + break; + } + /* Allocate space for return value pointer. */ + bytes += FFI_ALIGN (sizeof(void*), FFI_SIZEOF_ARG); + } + break; + case FFI_TYPE_COMPLEX: + switch (cif->rtype->elements[0]->type) + { + case FFI_TYPE_DOUBLE: + case FFI_TYPE_LONGDOUBLE: + case FFI_TYPE_SINT64: + case FFI_TYPE_UINT64: + goto do_struct; + case FFI_TYPE_FLOAT: + case FFI_TYPE_INT: + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + flags = X86_RET_INT64; + break; + case FFI_TYPE_SINT16: + case FFI_TYPE_UINT16: + flags = X86_RET_INT32; + break; + case FFI_TYPE_SINT8: + case FFI_TYPE_UINT8: + flags = X86_RET_STRUCT_2B; + break; + default: + return FFI_BAD_TYPEDEF; + } + break; + default: + return FFI_BAD_TYPEDEF; + } + cif->flags = flags; + + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *t = cif->arg_types[i]; + + bytes = FFI_ALIGN (bytes, t->alignment); + bytes += FFI_ALIGN (t->size, FFI_SIZEOF_ARG); + } + cif->bytes = FFI_ALIGN (bytes, 16); + + return FFI_OK; +} + +static ffi_arg +extend_basic_type(void *arg, int type) +{ + switch (type) + { + case FFI_TYPE_SINT8: + return *(SINT8 *)arg; + case FFI_TYPE_UINT8: + return *(UINT8 *)arg; + case FFI_TYPE_SINT16: + return *(SINT16 *)arg; + case FFI_TYPE_UINT16: + return *(UINT16 *)arg; + + case FFI_TYPE_SINT32: + case FFI_TYPE_UINT32: + case FFI_TYPE_POINTER: + case FFI_TYPE_FLOAT: + return *(UINT32 *)arg; + + default: + abort(); + } +} + +struct call_frame +{ + void *ebp; /* 0 */ + void *retaddr; /* 4 */ + void (*fn)(void); /* 8 */ + int flags; /* 12 */ + void *rvalue; /* 16 */ + unsigned regs[3]; /* 20-28 */ +}; + +struct abi_params +{ + int dir; /* parameter growth direction */ + int static_chain; /* the static chain register used by gcc */ + int nregs; /* number of register parameters */ + int regs[3]; +}; + +static const struct abi_params abi_params[FFI_LAST_ABI] = { + [FFI_SYSV] = { 1, R_ECX, 0 }, + [FFI_THISCALL] = { 1, R_EAX, 1, { R_ECX } }, + [FFI_FASTCALL] = { 1, R_EAX, 2, { R_ECX, R_EDX } }, + [FFI_STDCALL] = { 1, R_ECX, 0 }, + [FFI_PASCAL] = { -1, R_ECX, 0 }, + /* ??? No defined static chain; gcc does not support REGISTER. */ + [FFI_REGISTER] = { -1, R_ECX, 3, { R_EAX, R_EDX, R_ECX } }, + [FFI_MS_CDECL] = { 1, R_ECX, 0 } +}; + +#ifdef HAVE_FASTCALL + #ifdef _MSC_VER + #define FFI_DECLARE_FASTCALL __fastcall + #else + #define FFI_DECLARE_FASTCALL __declspec(fastcall) + #endif +#else + #define FFI_DECLARE_FASTCALL +#endif + +extern void FFI_DECLARE_FASTCALL ffi_call_i386(struct call_frame *, char *) FFI_HIDDEN; + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + size_t rsize, bytes; + struct call_frame *frame; + char *stack, *argp; + ffi_type **arg_types; + int flags, cabi, i, n, dir, narg_reg; + const struct abi_params *pabi; + + flags = cif->flags; + cabi = cif->abi; + pabi = &abi_params[cabi]; + dir = pabi->dir; + + rsize = 0; + if (rvalue == NULL) + { + switch (flags) + { + case X86_RET_FLOAT: + case X86_RET_DOUBLE: + case X86_RET_LDOUBLE: + case X86_RET_STRUCTPOP: + case X86_RET_STRUCTARG: + /* The float cases need to pop the 387 stack. + The struct cases need to pass a valid pointer to the callee. */ + rsize = cif->rtype->size; + break; + default: + /* We can pretend that the callee returns nothing. */ + flags = X86_RET_VOID; + break; + } + } + + bytes = cif->bytes; + stack = alloca(bytes + sizeof(*frame) + rsize); + argp = (dir < 0 ? stack + bytes : stack); + frame = (struct call_frame *)(stack + bytes); + if (rsize) + rvalue = frame + 1; + + frame->fn = fn; + frame->flags = flags; + frame->rvalue = rvalue; + frame->regs[pabi->static_chain] = (unsigned)closure; + + narg_reg = 0; + switch (flags) + { + case X86_RET_STRUCTARG: + /* The pointer is passed as the first argument. */ + if (pabi->nregs > 0) + { + frame->regs[pabi->regs[0]] = (unsigned)rvalue; + narg_reg = 1; + break; + } + /* fallthru */ + case X86_RET_STRUCTPOP: + *(void **)argp = rvalue; + argp += sizeof(void *); + break; + } + + arg_types = cif->arg_types; + for (i = 0, n = cif->nargs; i < n; i++) + { + ffi_type *ty = arg_types[i]; + void *valp = avalue[i]; + size_t z = ty->size; + int t = ty->type; + + if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT) + { + ffi_arg val = extend_basic_type (valp, t); + + if (t != FFI_TYPE_FLOAT && narg_reg < pabi->nregs) + frame->regs[pabi->regs[narg_reg++]] = val; + else if (dir < 0) + { + argp -= 4; + *(ffi_arg *)argp = val; + } + else + { + *(ffi_arg *)argp = val; + argp += 4; + } + } + else + { + size_t za = FFI_ALIGN (z, FFI_SIZEOF_ARG); + size_t align = FFI_SIZEOF_ARG; + + /* Issue 434: For thiscall and fastcall, if the paramter passed + as 64-bit integer or struct, all following integer paramters + will be passed on stack. */ + if ((cabi == FFI_THISCALL || cabi == FFI_FASTCALL) + && (t == FFI_TYPE_SINT64 + || t == FFI_TYPE_UINT64 + || t == FFI_TYPE_STRUCT)) + narg_reg = 2; + + /* Alignment rules for arguments are quite complex. Vectors and + structures with 16 byte alignment get it. Note that long double + on Darwin does have 16 byte alignment, and does not get this + alignment if passed directly; a structure with a long double + inside, however, would get 16 byte alignment. Since libffi does + not support vectors, we need non concern ourselves with other + cases. */ + if (t == FFI_TYPE_STRUCT && ty->alignment >= 16) + align = 16; + + if (dir < 0) + { + /* ??? These reverse argument ABIs are probably too old + to have cared about alignment. Someone should check. */ + argp -= za; + memcpy (argp, valp, z); + } + else + { + argp = (char *)FFI_ALIGN (argp, align); + memcpy (argp, valp, z); + argp += za; + } + } + } + FFI_ASSERT (dir > 0 || argp == stack); + + ffi_call_i386 (frame, stack); +} + +void +ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +ffi_call_go (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + +/** private members **/ + +void FFI_HIDDEN ffi_closure_i386(void); +void FFI_HIDDEN ffi_closure_STDCALL(void); +void FFI_HIDDEN ffi_closure_REGISTER(void); + +struct closure_frame +{ + unsigned rettemp[4]; /* 0 */ + unsigned regs[3]; /* 16-24 */ + ffi_cif *cif; /* 28 */ + void (*fun)(ffi_cif*,void*,void**,void*); /* 32 */ + void *user_data; /* 36 */ +}; + +int FFI_HIDDEN FFI_DECLARE_FASTCALL +ffi_closure_inner (struct closure_frame *frame, char *stack) +{ + ffi_cif *cif = frame->cif; + int cabi, i, n, flags, dir, narg_reg; + const struct abi_params *pabi; + ffi_type **arg_types; + char *argp; + void *rvalue; + void **avalue; + + cabi = cif->abi; + flags = cif->flags; + narg_reg = 0; + rvalue = frame->rettemp; + pabi = &abi_params[cabi]; + dir = pabi->dir; + argp = (dir < 0 ? stack + cif->bytes : stack); + + switch (flags) + { + case X86_RET_STRUCTARG: + if (pabi->nregs > 0) + { + rvalue = (void *)frame->regs[pabi->regs[0]]; + narg_reg = 1; + frame->rettemp[0] = (unsigned)rvalue; + break; + } + /* fallthru */ + case X86_RET_STRUCTPOP: + rvalue = *(void **)argp; + argp += sizeof(void *); + frame->rettemp[0] = (unsigned)rvalue; + break; + } + + n = cif->nargs; + avalue = alloca(sizeof(void *) * n); + + arg_types = cif->arg_types; + for (i = 0; i < n; ++i) + { + ffi_type *ty = arg_types[i]; + size_t z = ty->size; + int t = ty->type; + void *valp; + + if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT) + { + if (t != FFI_TYPE_FLOAT && narg_reg < pabi->nregs) + valp = &frame->regs[pabi->regs[narg_reg++]]; + else if (dir < 0) + { + argp -= 4; + valp = argp; + } + else + { + valp = argp; + argp += 4; + } + } + else + { + size_t za = FFI_ALIGN (z, FFI_SIZEOF_ARG); + size_t align = FFI_SIZEOF_ARG; + + /* See the comment in ffi_call_int. */ + if (t == FFI_TYPE_STRUCT && ty->alignment >= 16) + align = 16; + + /* Issue 434: For thiscall and fastcall, if the paramter passed + as 64-bit integer or struct, all following integer paramters + will be passed on stack. */ + if ((cabi == FFI_THISCALL || cabi == FFI_FASTCALL) + && (t == FFI_TYPE_SINT64 + || t == FFI_TYPE_UINT64 + || t == FFI_TYPE_STRUCT)) + narg_reg = 2; + + if (dir < 0) + { + /* ??? These reverse argument ABIs are probably too old + to have cared about alignment. Someone should check. */ + argp -= za; + valp = argp; + } + else + { + argp = (char *)FFI_ALIGN (argp, align); + valp = argp; + argp += za; + } + } + + avalue[i] = valp; + } + + frame->fun (cif, rvalue, avalue, frame->user_data); + + if (cabi == FFI_STDCALL) + return flags + (cif->bytes << X86_RET_POP_SHIFT); + else + return flags; +} + +ffi_status +ffi_prep_closure_loc (ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) +{ + char *tramp = closure->tramp; + void (*dest)(void); + int op = 0xb8; /* movl imm, %eax */ + + switch (cif->abi) + { + case FFI_SYSV: + case FFI_THISCALL: + case FFI_FASTCALL: + case FFI_MS_CDECL: + dest = ffi_closure_i386; + break; + case FFI_STDCALL: + case FFI_PASCAL: + dest = ffi_closure_STDCALL; + break; + case FFI_REGISTER: + dest = ffi_closure_REGISTER; + op = 0x68; /* pushl imm */ + break; + default: + return FFI_BAD_ABI; + } + + /* movl or pushl immediate. */ + tramp[0] = op; + *(void **)(tramp + 1) = codeloc; + + /* jmp dest */ + tramp[5] = 0xe9; + *(unsigned *)(tramp + 6) = (unsigned)dest - ((unsigned)codeloc + 10); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +void FFI_HIDDEN ffi_go_closure_EAX(void); +void FFI_HIDDEN ffi_go_closure_ECX(void); +void FFI_HIDDEN ffi_go_closure_STDCALL(void); + +ffi_status +ffi_prep_go_closure (ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*)) +{ + void (*dest)(void); + + switch (cif->abi) + { + case FFI_SYSV: + case FFI_MS_CDECL: + dest = ffi_go_closure_ECX; + break; + case FFI_THISCALL: + case FFI_FASTCALL: + dest = ffi_go_closure_EAX; + break; + case FFI_STDCALL: + case FFI_PASCAL: + dest = ffi_go_closure_STDCALL; + break; + case FFI_REGISTER: + default: + return FFI_BAD_ABI; + } + + closure->tramp = dest; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +/* ------- Native raw API support -------------------------------- */ + +#if !FFI_NO_RAW_API + +void FFI_HIDDEN ffi_closure_raw_SYSV(void); +void FFI_HIDDEN ffi_closure_raw_THISCALL(void); + +ffi_status +ffi_prep_raw_closure_loc (ffi_raw_closure *closure, + ffi_cif *cif, + void (*fun)(ffi_cif*,void*,ffi_raw*,void*), + void *user_data, + void *codeloc) +{ + char *tramp = closure->tramp; + void (*dest)(void); + int i; + + /* We currently don't support certain kinds of arguments for raw + closures. This should be implemented by a separate assembly + language routine, since it would require argument processing, + something we don't do now for performance. */ + for (i = cif->nargs-1; i >= 0; i--) + switch (cif->arg_types[i]->type) + { + case FFI_TYPE_STRUCT: + case FFI_TYPE_LONGDOUBLE: + return FFI_BAD_TYPEDEF; + } + + switch (cif->abi) + { + case FFI_THISCALL: + dest = ffi_closure_raw_THISCALL; + break; + case FFI_SYSV: + dest = ffi_closure_raw_SYSV; + break; + default: + return FFI_BAD_ABI; + } + + /* movl imm, %eax. */ + tramp[0] = 0xb8; + *(void **)(tramp + 1) = codeloc; + + /* jmp dest */ + tramp[5] = 0xe9; + *(unsigned *)(tramp + 6) = (unsigned)dest - ((unsigned)codeloc + 10); + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +void +ffi_raw_call(ffi_cif *cif, void (*fn)(void), void *rvalue, ffi_raw *avalue) +{ + size_t rsize, bytes; + struct call_frame *frame; + char *stack, *argp; + ffi_type **arg_types; + int flags, cabi, i, n, narg_reg; + const struct abi_params *pabi; + + flags = cif->flags; + cabi = cif->abi; + pabi = &abi_params[cabi]; + + rsize = 0; + if (rvalue == NULL) + { + switch (flags) + { + case X86_RET_FLOAT: + case X86_RET_DOUBLE: + case X86_RET_LDOUBLE: + case X86_RET_STRUCTPOP: + case X86_RET_STRUCTARG: + /* The float cases need to pop the 387 stack. + The struct cases need to pass a valid pointer to the callee. */ + rsize = cif->rtype->size; + break; + default: + /* We can pretend that the callee returns nothing. */ + flags = X86_RET_VOID; + break; + } + } + + bytes = cif->bytes; + argp = stack = + (void *)((uintptr_t)alloca(bytes + sizeof(*frame) + rsize + 15) & ~16); + frame = (struct call_frame *)(stack + bytes); + if (rsize) + rvalue = frame + 1; + + frame->fn = fn; + frame->flags = flags; + frame->rvalue = rvalue; + + narg_reg = 0; + switch (flags) + { + case X86_RET_STRUCTARG: + /* The pointer is passed as the first argument. */ + if (pabi->nregs > 0) + { + frame->regs[pabi->regs[0]] = (unsigned)rvalue; + narg_reg = 1; + break; + } + /* fallthru */ + case X86_RET_STRUCTPOP: + *(void **)argp = rvalue; + argp += sizeof(void *); + bytes -= sizeof(void *); + break; + } + + arg_types = cif->arg_types; + for (i = 0, n = cif->nargs; narg_reg < pabi->nregs && i < n; i++) + { + ffi_type *ty = arg_types[i]; + size_t z = ty->size; + int t = ty->type; + + if (z <= FFI_SIZEOF_ARG && t != FFI_TYPE_STRUCT && t != FFI_TYPE_FLOAT) + { + ffi_arg val = extend_basic_type (avalue, t); + frame->regs[pabi->regs[narg_reg++]] = val; + z = FFI_SIZEOF_ARG; + } + else + { + memcpy (argp, avalue, z); + z = FFI_ALIGN (z, FFI_SIZEOF_ARG); + argp += z; + } + avalue += z; + bytes -= z; + } + if (i < n) + memcpy (argp, avalue, bytes); + + ffi_call_i386 (frame, stack); +} +#endif /* !FFI_NO_RAW_API */ +#endif /* !__x86_64__ */ + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/x86/ffiw64_x86_64.c b/module/src/main/cpp/whale/src/libffi/x86/ffiw64_x86_64.c new file mode 100644 index 00000000..90362be9 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/x86/ffiw64_x86_64.c @@ -0,0 +1,313 @@ +#ifdef __x86_64__ + +/* ----------------------------------------------------------------------- + ffiw64.c - Copyright (c) 2018 Anthony Green + Copyright (c) 2014 Red Hat, Inc. + + x86 win64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#include +#include +#include +#include + +#ifdef X86_WIN64 +#define EFI64(name) name +#else +#define EFI64(name) name##_efi64 +#endif + +struct win64_call_frame +{ + UINT64 rbp; /* 0 */ + UINT64 retaddr; /* 8 */ + UINT64 fn; /* 16 */ + UINT64 flags; /* 24 */ + UINT64 rvalue; /* 32 */ +}; + +extern void ffi_call_win64 (void *stack, struct win64_call_frame *, + void *closure) FFI_HIDDEN; + +ffi_status +EFI64(ffi_prep_cif_machdep)(ffi_cif *cif) +{ + int flags, n; + + switch (cif->abi) + { + case FFI_WIN64: + case FFI_GNUW64: + break; + default: + return FFI_BAD_ABI; + } + + flags = cif->rtype->type; + switch (flags) + { + default: + break; + case FFI_TYPE_LONGDOUBLE: + /* GCC returns long double values by reference, like a struct */ + if (cif->abi == FFI_GNUW64) + flags = FFI_TYPE_STRUCT; + break; + case FFI_TYPE_COMPLEX: + flags = FFI_TYPE_STRUCT; + /* FALLTHRU */ + case FFI_TYPE_STRUCT: + switch (cif->rtype->size) + { + case 8: + flags = FFI_TYPE_UINT64; + break; + case 4: + flags = FFI_TYPE_SMALL_STRUCT_4B; + break; + case 2: + flags = FFI_TYPE_SMALL_STRUCT_2B; + break; + case 1: + flags = FFI_TYPE_SMALL_STRUCT_1B; + break; + } + break; + } + cif->flags = flags; + + /* Each argument either fits in a register, an 8 byte slot, or is + passed by reference with the pointer in the 8 byte slot. */ + n = cif->nargs; + n += (flags == FFI_TYPE_STRUCT); + if (n < 4) + n = 4; + cif->bytes = n * 8; + + return FFI_OK; +} + +static void +ffi_call_int (ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + int i, j, n, flags; + UINT64 *stack; + size_t rsize; + struct win64_call_frame *frame; + + FFI_ASSERT(cif->abi == FFI_GNUW64 || cif->abi == FFI_WIN64); + + flags = cif->flags; + rsize = 0; + + /* If we have no return value for a structure, we need to create one. + Otherwise we can ignore the return type entirely. */ + if (rvalue == NULL) + { + if (flags == FFI_TYPE_STRUCT) + rsize = cif->rtype->size; + else + flags = FFI_TYPE_VOID; + } + + stack = alloca(cif->bytes + sizeof(struct win64_call_frame) + rsize); + frame = (struct win64_call_frame *)((char *)stack + cif->bytes); + if (rsize) + rvalue = frame + 1; + + frame->fn = (uintptr_t)fn; + frame->flags = flags; + frame->rvalue = (uintptr_t)rvalue; + + j = 0; + if (flags == FFI_TYPE_STRUCT) + { + stack[0] = (uintptr_t)rvalue; + j = 1; + } + + for (i = 0, n = cif->nargs; i < n; ++i, ++j) + { + switch (cif->arg_types[i]->size) + { + case 8: + stack[j] = *(UINT64 *)avalue[i]; + break; + case 4: + stack[j] = *(UINT32 *)avalue[i]; + break; + case 2: + stack[j] = *(UINT16 *)avalue[i]; + break; + case 1: + stack[j] = *(UINT8 *)avalue[i]; + break; + default: + stack[j] = (uintptr_t)avalue[i]; + break; + } + } + + ffi_call_win64 (stack, frame, closure); +} + +void +EFI64(ffi_call)(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue) +{ + ffi_call_int (cif, fn, rvalue, avalue, NULL); +} + +void +EFI64(ffi_call_go)(ffi_cif *cif, void (*fn)(void), void *rvalue, + void **avalue, void *closure) +{ + ffi_call_int (cif, fn, rvalue, avalue, closure); +} + + +extern void ffi_closure_win64(void) FFI_HIDDEN; +extern void ffi_go_closure_win64(void) FFI_HIDDEN; + +ffi_status +EFI64(ffi_prep_closure_loc)(ffi_closure* closure, + ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + void *codeloc) +{ + static const unsigned char trampoline[16] = { + /* leaq -0x7(%rip),%r10 # 0x0 */ + 0x4c, 0x8d, 0x15, 0xf9, 0xff, 0xff, 0xff, + /* jmpq *0x3(%rip) # 0x10 */ + 0xff, 0x25, 0x03, 0x00, 0x00, 0x00, + /* nopl (%rax) */ + 0x0f, 0x1f, 0x00 + }; + char *tramp = closure->tramp; + + switch (cif->abi) + { + case FFI_WIN64: + case FFI_GNUW64: + break; + default: + return FFI_BAD_ABI; + } + + memcpy (tramp, trampoline, sizeof(trampoline)); + *(UINT64 *)(tramp + 16) = (uintptr_t)ffi_closure_win64; + + closure->cif = cif; + closure->fun = fun; + closure->user_data = user_data; + + return FFI_OK; +} + +ffi_status +EFI64(ffi_prep_go_closure)(ffi_go_closure* closure, ffi_cif* cif, + void (*fun)(ffi_cif*, void*, void**, void*)) +{ + switch (cif->abi) + { + case FFI_WIN64: + case FFI_GNUW64: + break; + default: + return FFI_BAD_ABI; + } + + closure->tramp = ffi_go_closure_win64; + closure->cif = cif; + closure->fun = fun; + + return FFI_OK; +} + +struct win64_closure_frame +{ + UINT64 rvalue[2]; + UINT64 fargs[4]; + UINT64 retaddr; + UINT64 args[]; +}; + +/* Force the inner function to use the MS ABI. When compiling on win64 + this is a nop. When compiling on unix, this simplifies the assembly, + and places the burden of saving the extra call-saved registers on + the compiler. */ +int FFI_HIDDEN __attribute__((ms_abi)) +ffi_closure_win64_inner(ffi_cif *cif, + void (*fun)(ffi_cif*, void*, void**, void*), + void *user_data, + struct win64_closure_frame *frame) +{ + void **avalue; + void *rvalue; + int i, n, nreg, flags; + + avalue = alloca(cif->nargs * sizeof(void *)); + rvalue = frame->rvalue; + nreg = 0; + + /* When returning a structure, the address is in the first argument. + We must also be prepared to return the same address in eax, so + install that address in the frame and pretend we return a pointer. */ + flags = cif->flags; + if (flags == FFI_TYPE_STRUCT) + { + rvalue = (void *)(uintptr_t)frame->args[0]; + frame->rvalue[0] = frame->args[0]; + nreg = 1; + } + + for (i = 0, n = cif->nargs; i < n; ++i, ++nreg) + { + size_t size = cif->arg_types[i]->size; + size_t type = cif->arg_types[i]->type; + void *a; + + if (type == FFI_TYPE_DOUBLE || type == FFI_TYPE_FLOAT) + { + if (nreg < 4) + a = &frame->fargs[nreg]; + else + a = &frame->args[nreg]; + } + else if (size == 1 || size == 2 || size == 4 || size == 8) + a = &frame->args[nreg]; + else + a = (void *)(uintptr_t)frame->args[nreg]; + + avalue[i] = a; + } + + /* Invoke the closure. */ + fun (cif, rvalue, avalue, user_data); + return flags; +} + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/x86/internal.h b/module/src/main/cpp/whale/src/libffi/x86/internal.h new file mode 100644 index 00000000..5cd20405 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/x86/internal.h @@ -0,0 +1,34 @@ +#ifdef __i386__ + +#define X86_RET_FLOAT 0 +#define X86_RET_DOUBLE 1 +#define X86_RET_LDOUBLE 2 +#define X86_RET_SINT8 3 +#define X86_RET_SINT16 4 +#define X86_RET_UINT8 5 +#define X86_RET_UINT16 6 +#define X86_RET_INT64 7 +#define X86_RET_INT32 8 +#define X86_RET_VOID 9 +#define X86_RET_STRUCTPOP 10 +#define X86_RET_STRUCTARG 11 +#define X86_RET_STRUCT_1B 12 +#define X86_RET_STRUCT_2B 13 +#define X86_RET_UNUSED14 14 +#define X86_RET_UNUSED15 15 + +#define X86_RET_TYPE_MASK 15 +#define X86_RET_POP_SHIFT 4 + +#define R_EAX 0 +#define R_EDX 1 +#define R_ECX 2 + +#ifdef __PCC__ +# define HAVE_FASTCALL 0 +#else +# define HAVE_FASTCALL 1 +#endif + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/x86/internal64.h b/module/src/main/cpp/whale/src/libffi/x86/internal64.h new file mode 100644 index 00000000..c9535d13 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/x86/internal64.h @@ -0,0 +1,27 @@ +#ifdef __x86_64__ + +#define UNIX64_RET_VOID 0 +#define UNIX64_RET_UINT8 1 +#define UNIX64_RET_UINT16 2 +#define UNIX64_RET_UINT32 3 +#define UNIX64_RET_SINT8 4 +#define UNIX64_RET_SINT16 5 +#define UNIX64_RET_SINT32 6 +#define UNIX64_RET_INT64 7 +#define UNIX64_RET_XMM32 8 +#define UNIX64_RET_XMM64 9 +#define UNIX64_RET_X87 10 +#define UNIX64_RET_X87_2 11 +#define UNIX64_RET_ST_XMM0_RAX 12 +#define UNIX64_RET_ST_RAX_XMM0 13 +#define UNIX64_RET_ST_XMM0_XMM1 14 +#define UNIX64_RET_ST_RAX_RDX 15 + +#define UNIX64_RET_LAST 15 + +#define UNIX64_FLAG_RET_IN_MEM (1 << 10) +#define UNIX64_FLAG_XMM_ARGS (1 << 11) +#define UNIX64_SIZE_SHIFT 12 + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/x86/sysv_i386.S b/module/src/main/cpp/whale/src/libffi/x86/sysv_i386.S new file mode 100644 index 00000000..41a49609 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/x86/sysv_i386.S @@ -0,0 +1,1134 @@ +#ifdef __i386__ + +/* ----------------------------------------------------------------------- + sysv.S - Copyright (c) 2017 Anthony Green + - Copyright (c) 2013 The Written Word, Inc. + - Copyright (c) 1996,1998,2001-2003,2005,2008,2010 Red Hat, Inc. + + X86 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifndef __x86_64__ +#ifndef _MSC_VER + +#define LIBFFI_ASM +#include +#include +#include "internal.h" + +#define C2(X, Y) X ## Y +#define C1(X, Y) C2(X, Y) +#ifdef __USER_LABEL_PREFIX__ +# define C(X) C1(__USER_LABEL_PREFIX__, X) +#else +# define C(X) X +#endif + +#ifdef X86_DARWIN +# define L(X) C1(L, X) +#else +# define L(X) C1(.L, X) +#endif + +#ifdef __ELF__ +# define ENDF(X) .type X,@function; .size X, . - X +#else +# define ENDF(X) +#endif + +/* Handle win32 fastcall name mangling. */ +#ifdef X86_WIN32 +# define ffi_call_i386 @ffi_call_i386@8 +# define ffi_closure_inner @ffi_closure_inner@8 +#else +# define ffi_call_i386 C(ffi_call_i386) +# define ffi_closure_inner C(ffi_closure_inner) +#endif + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) .balign 8 +#else +# define E(BASE, X) .balign 8; .org BASE + X * 8 +#endif + + .text + .balign 16 + .globl ffi_call_i386 + FFI_HIDDEN(ffi_call_i386) + +/* This is declared as + + void ffi_call_i386(struct call_frame *frame, char *argp) + __attribute__((fastcall)); + + Thus the arguments are present in + + ecx: frame + edx: argp +*/ + +ffi_call_i386: +L(UW0): + # cfi_startproc +#if !HAVE_FASTCALL + movl 4(%esp), %ecx + movl 8(%esp), %edx +#endif + movl (%esp), %eax /* move the return address */ + movl %ebp, (%ecx) /* store %ebp into local frame */ + movl %eax, 4(%ecx) /* store retaddr into local frame */ + + /* New stack frame based off ebp. This is a itty bit of unwind + trickery in that the CFA *has* changed. There is no easy way + to describe it correctly on entry to the function. Fortunately, + it doesn't matter too much since at all points we can correctly + unwind back to ffi_call. Note that the location to which we + moved the return address is (the new) CFA-4, so from the + perspective of the unwind info, it hasn't moved. */ + movl %ecx, %ebp +L(UW1): + # cfi_def_cfa(%ebp, 8) + # cfi_rel_offset(%ebp, 0) + + movl %edx, %esp /* set outgoing argument stack */ + movl 20+R_EAX*4(%ebp), %eax /* set register arguments */ + movl 20+R_EDX*4(%ebp), %edx + movl 20+R_ECX*4(%ebp), %ecx + + call *8(%ebp) + + movl 12(%ebp), %ecx /* load return type code */ + movl %ebx, 8(%ebp) /* preserve %ebx */ +L(UW2): + # cfi_rel_offset(%ebx, 8) + + andl $X86_RET_TYPE_MASK, %ecx +#ifdef __PIC__ + call C(__x86.get_pc_thunk.bx) +L(pc1): + leal L(store_table)-L(pc1)(%ebx, %ecx, 8), %ebx +#else + leal L(store_table)(,%ecx, 8), %ebx +#endif + movl 16(%ebp), %ecx /* load result address */ + jmp *%ebx + + .balign 8 +L(store_table): +E(L(store_table), X86_RET_FLOAT) + fstps (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_DOUBLE) + fstpl (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_LDOUBLE) + fstpt (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_SINT8) + movsbl %al, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_SINT16) + movswl %ax, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_UINT8) + movzbl %al, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_UINT16) + movzwl %ax, %eax + mov %eax, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_INT64) + movl %edx, 4(%ecx) + /* fallthru */ +E(L(store_table), X86_RET_INT32) + movl %eax, (%ecx) + /* fallthru */ +E(L(store_table), X86_RET_VOID) +L(e1): + movl 8(%ebp), %ebx + movl %ebp, %esp + popl %ebp +L(UW3): + # cfi_remember_state + # cfi_def_cfa(%esp, 4) + # cfi_restore(%ebx) + # cfi_restore(%ebp) + ret +L(UW4): + # cfi_restore_state + +E(L(store_table), X86_RET_STRUCTPOP) + jmp L(e1) +E(L(store_table), X86_RET_STRUCTARG) + jmp L(e1) +E(L(store_table), X86_RET_STRUCT_1B) + movb %al, (%ecx) + jmp L(e1) +E(L(store_table), X86_RET_STRUCT_2B) + movw %ax, (%ecx) + jmp L(e1) + + /* Fill out the table so that bad values are predictable. */ +E(L(store_table), X86_RET_UNUSED14) + ud2 +E(L(store_table), X86_RET_UNUSED15) + ud2 + +L(UW5): + # cfi_endproc +ENDF(ffi_call_i386) + +/* The inner helper is declared as + + void ffi_closure_inner(struct closure_frame *frame, char *argp) + __attribute_((fastcall)) + + Thus the arguments are placed in + + ecx: frame + edx: argp +*/ + +/* Macros to help setting up the closure_data structure. */ + +#if HAVE_FASTCALL +# define closure_FS (40 + 4) +# define closure_CF 0 +#else +# define closure_FS (8 + 40 + 12) +# define closure_CF 8 +#endif + +#define FFI_CLOSURE_SAVE_REGS \ + movl %eax, closure_CF+16+R_EAX*4(%esp); \ + movl %edx, closure_CF+16+R_EDX*4(%esp); \ + movl %ecx, closure_CF+16+R_ECX*4(%esp) + +#define FFI_CLOSURE_COPY_TRAMP_DATA \ + movl FFI_TRAMPOLINE_SIZE(%eax), %edx; /* copy cif */ \ + movl FFI_TRAMPOLINE_SIZE+4(%eax), %ecx; /* copy fun */ \ + movl FFI_TRAMPOLINE_SIZE+8(%eax), %eax; /* copy user_data */ \ + movl %edx, closure_CF+28(%esp); \ + movl %ecx, closure_CF+32(%esp); \ + movl %eax, closure_CF+36(%esp) + +#if HAVE_FASTCALL +# define FFI_CLOSURE_PREP_CALL \ + movl %esp, %ecx; /* load closure_data */ \ + leal closure_FS+4(%esp), %edx; /* load incoming stack */ +#else +# define FFI_CLOSURE_PREP_CALL \ + leal closure_CF(%esp), %ecx; /* load closure_data */ \ + leal closure_FS+4(%esp), %edx; /* load incoming stack */ \ + movl %ecx, (%esp); \ + movl %edx, 4(%esp) +#endif + +#define FFI_CLOSURE_CALL_INNER(UWN) \ + call ffi_closure_inner + +#define FFI_CLOSURE_MASK_AND_JUMP(N, UW) \ + andl $X86_RET_TYPE_MASK, %eax; \ + leal L(C1(load_table,N))(, %eax, 8), %edx; \ + movl closure_CF(%esp), %eax; /* optimiztic load */ \ + jmp *%edx + +#ifdef __PIC__ +# if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE +# undef FFI_CLOSURE_MASK_AND_JUMP +# define FFI_CLOSURE_MASK_AND_JUMP(N, UW) \ + andl $X86_RET_TYPE_MASK, %eax; \ + call C(__x86.get_pc_thunk.dx); \ +L(C1(pc,N)): \ + leal L(C1(load_table,N))-L(C1(pc,N))(%edx, %eax, 8), %edx; \ + movl closure_CF(%esp), %eax; /* optimiztic load */ \ + jmp *%edx +# else +# define FFI_CLOSURE_CALL_INNER_SAVE_EBX +# undef FFI_CLOSURE_CALL_INNER +# define FFI_CLOSURE_CALL_INNER(UWN) \ + movl %ebx, 40(%esp); /* save ebx */ \ +L(C1(UW,UWN)): \ + /* cfi_rel_offset(%ebx, 40); */ \ + call C(__x86.get_pc_thunk.bx); /* load got register */ \ + addl $C(_GLOBAL_OFFSET_TABLE_), %ebx; \ + call ffi_closure_inner@PLT +# undef FFI_CLOSURE_MASK_AND_JUMP +# define FFI_CLOSURE_MASK_AND_JUMP(N, UWN) \ + andl $X86_RET_TYPE_MASK, %eax; \ + leal L(C1(load_table,N))@GOTOFF(%ebx, %eax, 8), %edx; \ + movl 40(%esp), %ebx; /* restore ebx */ \ +L(C1(UW,UWN)): \ + /* cfi_restore(%ebx); */ \ + movl closure_CF(%esp), %eax; /* optimiztic load */ \ + jmp *%edx +# endif /* DARWIN || HIDDEN */ +#endif /* __PIC__ */ + + .balign 16 + .globl C(ffi_go_closure_EAX) + FFI_HIDDEN(C(ffi_go_closure_EAX)) +C(ffi_go_closure_EAX): +L(UW6): + # cfi_startproc + subl $closure_FS, %esp +L(UW7): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl 4(%eax), %edx /* copy cif */ + movl 8(%eax), %ecx /* copy fun */ + movl %edx, closure_CF+28(%esp) + movl %ecx, closure_CF+32(%esp) + movl %eax, closure_CF+36(%esp) /* closure is user_data */ + jmp L(do_closure_i386) +L(UW8): + # cfi_endproc +ENDF(C(ffi_go_closure_EAX)) + + .balign 16 + .globl C(ffi_go_closure_ECX) + FFI_HIDDEN(C(ffi_go_closure_ECX)) +C(ffi_go_closure_ECX): +L(UW9): + # cfi_startproc + subl $closure_FS, %esp +L(UW10): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl 4(%ecx), %edx /* copy cif */ + movl 8(%ecx), %eax /* copy fun */ + movl %edx, closure_CF+28(%esp) + movl %eax, closure_CF+32(%esp) + movl %ecx, closure_CF+36(%esp) /* closure is user_data */ + jmp L(do_closure_i386) +L(UW11): + # cfi_endproc +ENDF(C(ffi_go_closure_ECX)) + +/* The closure entry points are reached from the ffi_closure trampoline. + On entry, %eax contains the address of the ffi_closure. */ + + .balign 16 + .globl C(ffi_closure_i386) + FFI_HIDDEN(C(ffi_closure_i386)) + +C(ffi_closure_i386): +L(UW12): + # cfi_startproc + subl $closure_FS, %esp +L(UW13): + # cfi_def_cfa_offset(closure_FS + 4) + + FFI_CLOSURE_SAVE_REGS + FFI_CLOSURE_COPY_TRAMP_DATA + + /* Entry point from preceeding Go closures. */ +L(do_closure_i386): + + FFI_CLOSURE_PREP_CALL + FFI_CLOSURE_CALL_INNER(14) + FFI_CLOSURE_MASK_AND_JUMP(2, 15) + + .balign 8 +L(load_table2): +E(L(load_table2), X86_RET_FLOAT) + flds closure_CF(%esp) + jmp L(e2) +E(L(load_table2), X86_RET_DOUBLE) + fldl closure_CF(%esp) + jmp L(e2) +E(L(load_table2), X86_RET_LDOUBLE) + fldt closure_CF(%esp) + jmp L(e2) +E(L(load_table2), X86_RET_SINT8) + movsbl %al, %eax + jmp L(e2) +E(L(load_table2), X86_RET_SINT16) + movswl %ax, %eax + jmp L(e2) +E(L(load_table2), X86_RET_UINT8) + movzbl %al, %eax + jmp L(e2) +E(L(load_table2), X86_RET_UINT16) + movzwl %ax, %eax + jmp L(e2) +E(L(load_table2), X86_RET_INT64) + movl closure_CF+4(%esp), %edx + jmp L(e2) +E(L(load_table2), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table2), X86_RET_VOID) +L(e2): + addl $closure_FS, %esp +L(UW16): + # cfi_adjust_cfa_offset(-closure_FS) + ret +L(UW17): + # cfi_adjust_cfa_offset(closure_FS) +E(L(load_table2), X86_RET_STRUCTPOP) + addl $closure_FS, %esp +L(UW18): + # cfi_adjust_cfa_offset(-closure_FS) + ret $4 +L(UW19): + # cfi_adjust_cfa_offset(closure_FS) +E(L(load_table2), X86_RET_STRUCTARG) + jmp L(e2) +E(L(load_table2), X86_RET_STRUCT_1B) + movzbl %al, %eax + jmp L(e2) +E(L(load_table2), X86_RET_STRUCT_2B) + movzwl %ax, %eax + jmp L(e2) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table2), X86_RET_UNUSED14) + ud2 +E(L(load_table2), X86_RET_UNUSED15) + ud2 + +L(UW20): + # cfi_endproc +ENDF(C(ffi_closure_i386)) + + .balign 16 + .globl C(ffi_go_closure_STDCALL) + FFI_HIDDEN(C(ffi_go_closure_STDCALL)) +C(ffi_go_closure_STDCALL): +L(UW21): + # cfi_startproc + subl $closure_FS, %esp +L(UW22): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl 4(%ecx), %edx /* copy cif */ + movl 8(%ecx), %eax /* copy fun */ + movl %edx, closure_CF+28(%esp) + movl %eax, closure_CF+32(%esp) + movl %ecx, closure_CF+36(%esp) /* closure is user_data */ + jmp L(do_closure_STDCALL) +L(UW23): + # cfi_endproc +ENDF(C(ffi_go_closure_STDCALL)) + +/* For REGISTER, we have no available parameter registers, and so we + enter here having pushed the closure onto the stack. */ + + .balign 16 + .globl C(ffi_closure_REGISTER) + FFI_HIDDEN(C(ffi_closure_REGISTER)) +C(ffi_closure_REGISTER): +L(UW24): + # cfi_startproc + # cfi_def_cfa(%esp, 8) + # cfi_offset(%eip, -8) + subl $closure_FS-4, %esp +L(UW25): + # cfi_def_cfa_offset(closure_FS + 4) + FFI_CLOSURE_SAVE_REGS + movl closure_FS-4(%esp), %ecx /* load retaddr */ + movl closure_FS(%esp), %eax /* load closure */ + movl %ecx, closure_FS(%esp) /* move retaddr */ + jmp L(do_closure_REGISTER) +L(UW26): + # cfi_endproc +ENDF(C(ffi_closure_REGISTER)) + +/* For STDCALL (and others), we need to pop N bytes of arguments off + the stack following the closure. The amount needing to be popped + is returned to us from ffi_closure_inner. */ + + .balign 16 + .globl C(ffi_closure_STDCALL) + FFI_HIDDEN(C(ffi_closure_STDCALL)) +C(ffi_closure_STDCALL): +L(UW27): + # cfi_startproc + subl $closure_FS, %esp +L(UW28): + # cfi_def_cfa_offset(closure_FS + 4) + + FFI_CLOSURE_SAVE_REGS + + /* Entry point from ffi_closure_REGISTER. */ +L(do_closure_REGISTER): + + FFI_CLOSURE_COPY_TRAMP_DATA + + /* Entry point from preceeding Go closure. */ +L(do_closure_STDCALL): + + FFI_CLOSURE_PREP_CALL + FFI_CLOSURE_CALL_INNER(29) + + movl %eax, %ecx + shrl $X86_RET_POP_SHIFT, %ecx /* isolate pop count */ + leal closure_FS(%esp, %ecx), %ecx /* compute popped esp */ + movl closure_FS(%esp), %edx /* move return address */ + movl %edx, (%ecx) + + /* From this point on, the value of %esp upon return is %ecx+4, + and we've copied the return address to %ecx to make return easy. + There's no point in representing this in the unwind info, as + there is always a window between the mov and the ret which + will be wrong from one point of view or another. */ + + FFI_CLOSURE_MASK_AND_JUMP(3, 30) + + .balign 8 +L(load_table3): +E(L(load_table3), X86_RET_FLOAT) + flds closure_CF(%esp) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_DOUBLE) + fldl closure_CF(%esp) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_LDOUBLE) + fldt closure_CF(%esp) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_SINT8) + movsbl %al, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_SINT16) + movswl %ax, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_UINT8) + movzbl %al, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_UINT16) + movzwl %ax, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_INT64) + movl closure_CF+4(%esp), %edx + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_INT32) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_VOID) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCTPOP) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCTARG) + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCT_1B) + movzbl %al, %eax + movl %ecx, %esp + ret +E(L(load_table3), X86_RET_STRUCT_2B) + movzwl %ax, %eax + movl %ecx, %esp + ret + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table3), X86_RET_UNUSED14) + ud2 +E(L(load_table3), X86_RET_UNUSED15) + ud2 + +L(UW31): + # cfi_endproc +ENDF(C(ffi_closure_STDCALL)) + +#if !FFI_NO_RAW_API + +#define raw_closure_S_FS (16+16+12) + + .balign 16 + .globl C(ffi_closure_raw_SYSV) + FFI_HIDDEN(C(ffi_closure_raw_SYSV)) +C(ffi_closure_raw_SYSV): +L(UW32): + # cfi_startproc + subl $raw_closure_S_FS, %esp +L(UW33): + # cfi_def_cfa_offset(raw_closure_S_FS + 4) + movl %ebx, raw_closure_S_FS-4(%esp) +L(UW34): + # cfi_rel_offset(%ebx, raw_closure_S_FS-4) + + movl FFI_TRAMPOLINE_SIZE+8(%eax), %edx /* load cl->user_data */ + movl %edx, 12(%esp) + leal raw_closure_S_FS+4(%esp), %edx /* load raw_args */ + movl %edx, 8(%esp) + leal 16(%esp), %edx /* load &res */ + movl %edx, 4(%esp) + movl FFI_TRAMPOLINE_SIZE(%eax), %ebx /* load cl->cif */ + movl %ebx, (%esp) + call *FFI_TRAMPOLINE_SIZE+4(%eax) /* call cl->fun */ + + movl 20(%ebx), %eax /* load cif->flags */ + andl $X86_RET_TYPE_MASK, %eax +#ifdef __PIC__ + call C(__x86.get_pc_thunk.bx) +L(pc4): + leal L(load_table4)-L(pc4)(%ebx, %eax, 8), %ecx +#else + leal L(load_table4)(,%eax, 8), %ecx +#endif + movl raw_closure_S_FS-4(%esp), %ebx +L(UW35): + # cfi_restore(%ebx) + movl 16(%esp), %eax /* Optimistic load */ + jmp *%ecx + + .balign 8 +L(load_table4): +E(L(load_table4), X86_RET_FLOAT) + flds 16(%esp) + jmp L(e4) +E(L(load_table4), X86_RET_DOUBLE) + fldl 16(%esp) + jmp L(e4) +E(L(load_table4), X86_RET_LDOUBLE) + fldt 16(%esp) + jmp L(e4) +E(L(load_table4), X86_RET_SINT8) + movsbl %al, %eax + jmp L(e4) +E(L(load_table4), X86_RET_SINT16) + movswl %ax, %eax + jmp L(e4) +E(L(load_table4), X86_RET_UINT8) + movzbl %al, %eax + jmp L(e4) +E(L(load_table4), X86_RET_UINT16) + movzwl %ax, %eax + jmp L(e4) +E(L(load_table4), X86_RET_INT64) + movl 16+4(%esp), %edx + jmp L(e4) +E(L(load_table4), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table4), X86_RET_VOID) +L(e4): + addl $raw_closure_S_FS, %esp +L(UW36): + # cfi_adjust_cfa_offset(-raw_closure_S_FS) + ret +L(UW37): + # cfi_adjust_cfa_offset(raw_closure_S_FS) +E(L(load_table4), X86_RET_STRUCTPOP) + addl $raw_closure_S_FS, %esp +L(UW38): + # cfi_adjust_cfa_offset(-raw_closure_S_FS) + ret $4 +L(UW39): + # cfi_adjust_cfa_offset(raw_closure_S_FS) +E(L(load_table4), X86_RET_STRUCTARG) + jmp L(e4) +E(L(load_table4), X86_RET_STRUCT_1B) + movzbl %al, %eax + jmp L(e4) +E(L(load_table4), X86_RET_STRUCT_2B) + movzwl %ax, %eax + jmp L(e4) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table4), X86_RET_UNUSED14) + ud2 +E(L(load_table4), X86_RET_UNUSED15) + ud2 + +L(UW40): + # cfi_endproc +ENDF(C(ffi_closure_raw_SYSV)) + +#define raw_closure_T_FS (16+16+8) + + .balign 16 + .globl C(ffi_closure_raw_THISCALL) + FFI_HIDDEN(C(ffi_closure_raw_THISCALL)) +C(ffi_closure_raw_THISCALL): +L(UW41): + # cfi_startproc + /* Rearrange the stack such that %ecx is the first argument. + This means moving the return address. */ + popl %edx +L(UW42): + # cfi_def_cfa_offset(0) + # cfi_register(%eip, %edx) + pushl %ecx +L(UW43): + # cfi_adjust_cfa_offset(4) + pushl %edx +L(UW44): + # cfi_adjust_cfa_offset(4) + # cfi_rel_offset(%eip, 0) + subl $raw_closure_T_FS, %esp +L(UW45): + # cfi_adjust_cfa_offset(raw_closure_T_FS) + movl %ebx, raw_closure_T_FS-4(%esp) +L(UW46): + # cfi_rel_offset(%ebx, raw_closure_T_FS-4) + + movl FFI_TRAMPOLINE_SIZE+8(%eax), %edx /* load cl->user_data */ + movl %edx, 12(%esp) + leal raw_closure_T_FS+4(%esp), %edx /* load raw_args */ + movl %edx, 8(%esp) + leal 16(%esp), %edx /* load &res */ + movl %edx, 4(%esp) + movl FFI_TRAMPOLINE_SIZE(%eax), %ebx /* load cl->cif */ + movl %ebx, (%esp) + call *FFI_TRAMPOLINE_SIZE+4(%eax) /* call cl->fun */ + + movl 20(%ebx), %eax /* load cif->flags */ + andl $X86_RET_TYPE_MASK, %eax +#ifdef __PIC__ + call C(__x86.get_pc_thunk.bx) +L(pc5): + leal L(load_table5)-L(pc5)(%ebx, %eax, 8), %ecx +#else + leal L(load_table5)(,%eax, 8), %ecx +#endif + movl raw_closure_T_FS-4(%esp), %ebx +L(UW47): + # cfi_restore(%ebx) + movl 16(%esp), %eax /* Optimistic load */ + jmp *%ecx + + .balign 8 +L(load_table5): +E(L(load_table5), X86_RET_FLOAT) + flds 16(%esp) + jmp L(e5) +E(L(load_table5), X86_RET_DOUBLE) + fldl 16(%esp) + jmp L(e5) +E(L(load_table5), X86_RET_LDOUBLE) + fldt 16(%esp) + jmp L(e5) +E(L(load_table5), X86_RET_SINT8) + movsbl %al, %eax + jmp L(e5) +E(L(load_table5), X86_RET_SINT16) + movswl %ax, %eax + jmp L(e5) +E(L(load_table5), X86_RET_UINT8) + movzbl %al, %eax + jmp L(e5) +E(L(load_table5), X86_RET_UINT16) + movzwl %ax, %eax + jmp L(e5) +E(L(load_table5), X86_RET_INT64) + movl 16+4(%esp), %edx + jmp L(e5) +E(L(load_table5), X86_RET_INT32) + nop + /* fallthru */ +E(L(load_table5), X86_RET_VOID) +L(e5): + addl $raw_closure_T_FS, %esp +L(UW48): + # cfi_adjust_cfa_offset(-raw_closure_T_FS) + /* Remove the extra %ecx argument we pushed. */ + ret $4 +L(UW49): + # cfi_adjust_cfa_offset(raw_closure_T_FS) +E(L(load_table5), X86_RET_STRUCTPOP) + addl $raw_closure_T_FS, %esp +L(UW50): + # cfi_adjust_cfa_offset(-raw_closure_T_FS) + ret $8 +L(UW51): + # cfi_adjust_cfa_offset(raw_closure_T_FS) +E(L(load_table5), X86_RET_STRUCTARG) + jmp L(e5) +E(L(load_table5), X86_RET_STRUCT_1B) + movzbl %al, %eax + jmp L(e5) +E(L(load_table5), X86_RET_STRUCT_2B) + movzwl %ax, %eax + jmp L(e5) + + /* Fill out the table so that bad values are predictable. */ +E(L(load_table5), X86_RET_UNUSED14) + ud2 +E(L(load_table5), X86_RET_UNUSED15) + ud2 + +L(UW52): + # cfi_endproc +ENDF(C(ffi_closure_raw_THISCALL)) + +#endif /* !FFI_NO_RAW_API */ + +#ifdef X86_DARWIN +# define COMDAT(X) \ + .section __TEXT,__text,coalesced,pure_instructions; \ + .weak_definition X; \ + FFI_HIDDEN(X) +#elif defined __ELF__ && !(defined(__sun__) && defined(__svr4__)) +# define COMDAT(X) \ + .section .text.X,"axG",@progbits,X,comdat; \ + .globl X; \ + FFI_HIDDEN(X) +#else +# define COMDAT(X) +#endif + +#if defined(__PIC__) + COMDAT(C(__x86.get_pc_thunk.bx)) +C(__x86.get_pc_thunk.bx): + movl (%esp), %ebx + ret +ENDF(C(__x86.get_pc_thunk.bx)) +# if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE + COMDAT(C(__x86.get_pc_thunk.dx)) +C(__x86.get_pc_thunk.dx): + movl (%esp), %edx + ret +ENDF(C(__x86.get_pc_thunk.dx)) +#endif /* DARWIN || HIDDEN */ +#endif /* __PIC__ */ + +/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */ + +#ifdef __APPLE__ +.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EHFrame0: +#elif defined(X86_WIN32) +.section .eh_frame,"r" +#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE) +.section .eh_frame,EH_FRAME_FLAGS,@unwind +#else +.section .eh_frame,EH_FRAME_FLAGS,@progbits +#endif + +#ifdef HAVE_AS_X86_PCREL +# define PCREL(X) X - . +#else +# define PCREL(X) X@rel +#endif + +/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */ +#define ADV(N, P) .byte 2, L(N)-L(P) + + .balign 4 +L(CIE): + .set L(set0),L(ECIE)-L(SCIE) + .long L(set0) /* CIE Length */ +L(SCIE): + .long 0 /* CIE Identifier Tag */ + .byte 1 /* CIE Version */ + .ascii "zR\0" /* CIE Augmentation */ + .byte 1 /* CIE Code Alignment Factor */ + .byte 0x7c /* CIE Data Alignment Factor */ + .byte 0x8 /* CIE RA Column */ + .byte 1 /* Augmentation size */ + .byte 0x1b /* FDE Encoding (pcrel sdata4) */ + .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp offset 4 */ + .byte 0x80+8, 1 /* DW_CFA_offset, %eip offset 1*-4 */ + .balign 4 +L(ECIE): + + .set L(set1),L(EFDE1)-L(SFDE1) + .long L(set1) /* FDE Length */ +L(SFDE1): + .long L(SFDE1)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW0)) /* Initial location */ + .long L(UW5)-L(UW0) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW1, UW0) + .byte 0xc, 5, 8 /* DW_CFA_def_cfa, %ebp 8 */ + .byte 0x80+5, 2 /* DW_CFA_offset, %ebp 2*-4 */ + ADV(UW2, UW1) + .byte 0x80+3, 0 /* DW_CFA_offset, %ebx 0*-4 */ + ADV(UW3, UW2) + .byte 0xa /* DW_CFA_remember_state */ + .byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp 4 */ + .byte 0xc0+3 /* DW_CFA_restore, %ebx */ + .byte 0xc0+5 /* DW_CFA_restore, %ebp */ + ADV(UW4, UW3) + .byte 0xb /* DW_CFA_restore_state */ + .balign 4 +L(EFDE1): + + .set L(set2),L(EFDE2)-L(SFDE2) + .long L(set2) /* FDE Length */ +L(SFDE2): + .long L(SFDE2)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW6)) /* Initial location */ + .long L(UW8)-L(UW6) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW7, UW6) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE2): + + .set L(set3),L(EFDE3)-L(SFDE3) + .long L(set3) /* FDE Length */ +L(SFDE3): + .long L(SFDE3)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW9)) /* Initial location */ + .long L(UW11)-L(UW9) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW10, UW9) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE3): + + .set L(set4),L(EFDE4)-L(SFDE4) + .long L(set4) /* FDE Length */ +L(SFDE4): + .long L(SFDE4)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW12)) /* Initial location */ + .long L(UW20)-L(UW12) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW13, UW12) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ +#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX + ADV(UW14, UW13) + .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ + ADV(UW15, UW14) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW16, UW15) +#else + ADV(UW16, UW13) +#endif + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW17, UW16) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW18, UW17) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW19, UW18) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE4): + + .set L(set5),L(EFDE5)-L(SFDE5) + .long L(set5) /* FDE Length */ +L(SFDE5): + .long L(SFDE5)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW21)) /* Initial location */ + .long L(UW23)-L(UW21) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW22, UW21) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE5): + + .set L(set6),L(EFDE6)-L(SFDE6) + .long L(set6) /* FDE Length */ +L(SFDE6): + .long L(SFDE6)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW24)) /* Initial location */ + .long L(UW26)-L(UW24) /* Address range */ + .byte 0 /* Augmentation size */ + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + .byte 0x80+8, 2 /* DW_CFA_offset %eip, 2*-4 */ + ADV(UW25, UW24) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE6): + + .set L(set7),L(EFDE7)-L(SFDE7) + .long L(set7) /* FDE Length */ +L(SFDE7): + .long L(SFDE7)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW27)) /* Initial location */ + .long L(UW31)-L(UW27) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW28, UW27) + .byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */ +#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX + ADV(UW29, UW28) + .byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */ + ADV(UW30, UW29) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ +#endif + .balign 4 +L(EFDE7): + +#if !FFI_NO_RAW_API + .set L(set8),L(EFDE8)-L(SFDE8) + .long L(set8) /* FDE Length */ +L(SFDE8): + .long L(SFDE8)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW32)) /* Initial location */ + .long L(UW40)-L(UW32) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW33, UW32) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW34, UW33) + .byte 0x80+3, 2 /* DW_CFA_offset %ebx 2*-4 */ + ADV(UW35, UW34) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW36, UW35) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW37, UW36) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + ADV(UW38, UW37) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW39, UW38) + .byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE8): + + .set L(set9),L(EFDE9)-L(SFDE9) + .long L(set9) /* FDE Length */ +L(SFDE9): + .long L(SFDE9)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW41)) /* Initial location */ + .long L(UW52)-L(UW41) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW42, UW41) + .byte 0xe, 0 /* DW_CFA_def_cfa_offset */ + .byte 0x9, 8, 2 /* DW_CFA_register %eip, %edx */ + ADV(UW43, UW42) + .byte 0xe, 4 /* DW_CFA_def_cfa_offset */ + ADV(UW44, UW43) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + .byte 0x80+8, 2 /* DW_CFA_offset %eip 2*-4 */ + ADV(UW45, UW44) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + ADV(UW46, UW45) + .byte 0x80+3, 3 /* DW_CFA_offset %ebx 3*-4 */ + ADV(UW47, UW46) + .byte 0xc0+3 /* DW_CFA_restore %ebx */ + ADV(UW48, UW47) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + ADV(UW49, UW48) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + ADV(UW50, UW49) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset */ + ADV(UW51, UW50) + .byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */ + .balign 4 +L(EFDE9): +#endif /* !FFI_NO_RAW_API */ + +#ifdef _WIN32 + .def @feat.00; + .scl 3; + .type 0; + .endef + .globl @feat.00 +@feat.00 = 1 +#endif + +#ifdef __APPLE__ + .subsections_via_symbols + .section __LD,__compact_unwind,regular,debug + + /* compact unwind for ffi_call_i386 */ + .long C(ffi_call_i386) + .set L1,L(UW5)-L(UW0) + .long L1 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_go_closure_EAX */ + .long C(ffi_go_closure_EAX) + .set L2,L(UW8)-L(UW6) + .long L2 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_go_closure_ECX */ + .long C(ffi_go_closure_ECX) + .set L3,L(UW11)-L(UW9) + .long L3 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_i386 */ + .long C(ffi_closure_i386) + .set L4,L(UW20)-L(UW12) + .long L4 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_go_closure_STDCALL */ + .long C(ffi_go_closure_STDCALL) + .set L5,L(UW23)-L(UW21) + .long L5 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_REGISTER */ + .long C(ffi_closure_REGISTER) + .set L6,L(UW26)-L(UW24) + .long L6 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_STDCALL */ + .long C(ffi_closure_STDCALL) + .set L7,L(UW31)-L(UW27) + .long L7 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_raw_SYSV */ + .long C(ffi_closure_raw_SYSV) + .set L8,L(UW40)-L(UW32) + .long L8 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 + + /* compact unwind for ffi_closure_raw_THISCALL */ + .long C(ffi_closure_raw_THISCALL) + .set L9,L(UW52)-L(UW41) + .long L9 + .long 0x04000000 /* use dwarf unwind info */ + .long 0 + .long 0 +#endif /* __APPLE__ */ + +#endif /* ifndef _MSC_VER */ +#endif /* ifndef __x86_64__ */ + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/x86/unix64_x86_64.S b/module/src/main/cpp/whale/src/libffi/x86/unix64_x86_64.S new file mode 100644 index 00000000..56469f3b --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/x86/unix64_x86_64.S @@ -0,0 +1,571 @@ +#ifdef __x86_64__ + +/* ----------------------------------------------------------------------- + unix64.S - Copyright (c) 2013 The Written Word, Inc. + - Copyright (c) 2008 Red Hat, Inc + - Copyright (c) 2002 Bo Thorsen + + x86-64 Foreign Function Interface + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + ``Software''), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + ----------------------------------------------------------------------- */ + +#ifdef __x86_64__ +#define LIBFFI_ASM +#include +#include +#include "internal64.h" +#include "asmnames.h" + + .text + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) .balign 8 +#else +# define E(BASE, X) .balign 8; .org BASE + X * 8 +#endif + +/* ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags, + void *raddr, void (*fnaddr)(void)); + + Bit o trickiness here -- ARGS+BYTES is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + + .balign 8 + .globl C(ffi_call_unix64) + FFI_HIDDEN(C(ffi_call_unix64)) + +C(ffi_call_unix64): +L(UW0): + movq (%rsp), %r10 /* Load return address. */ + leaq (%rdi, %rsi), %rax /* Find local stack base. */ + movq %rdx, (%rax) /* Save flags. */ + movq %rcx, 8(%rax) /* Save raddr. */ + movq %rbp, 16(%rax) /* Save old frame pointer. */ + movq %r10, 24(%rax) /* Relocate return address. */ + movq %rax, %rbp /* Finalize local stack frame. */ + + /* New stack frame based off rbp. This is a itty bit of unwind + trickery in that the CFA *has* changed. There is no easy way + to describe it correctly on entry to the function. Fortunately, + it doesn't matter too much since at all points we can correctly + unwind back to ffi_call. Note that the location to which we + moved the return address is (the new) CFA-8, so from the + perspective of the unwind info, it hasn't moved. */ +L(UW1): + /* cfi_def_cfa(%rbp, 32) */ + /* cfi_rel_offset(%rbp, 16) */ + + movq %rdi, %r10 /* Save a copy of the register area. */ + movq %r8, %r11 /* Save a copy of the target fn. */ + movl %r9d, %eax /* Set number of SSE registers. */ + + /* Load up all argument registers. */ + movq (%r10), %rdi + movq 0x08(%r10), %rsi + movq 0x10(%r10), %rdx + movq 0x18(%r10), %rcx + movq 0x20(%r10), %r8 + movq 0x28(%r10), %r9 + movl 0xb0(%r10), %eax + testl %eax, %eax + jnz L(load_sse) +L(ret_from_load_sse): + + /* Deallocate the reg arg area, except for r10, then load via pop. */ + leaq 0xb8(%r10), %rsp + popq %r10 + + /* Call the user function. */ + call *%r11 + + /* Deallocate stack arg area; local stack frame in redzone. */ + leaq 24(%rbp), %rsp + + movq 0(%rbp), %rcx /* Reload flags. */ + movq 8(%rbp), %rdi /* Reload raddr. */ + movq 16(%rbp), %rbp /* Reload old frame pointer. */ +L(UW2): + /* cfi_remember_state */ + /* cfi_def_cfa(%rsp, 8) */ + /* cfi_restore(%rbp) */ + + /* The first byte of the flags contains the FFI_TYPE. */ + cmpb $UNIX64_RET_LAST, %cl + movzbl %cl, %r10d + leaq L(store_table)(%rip), %r11 + ja L(sa) + leaq (%r11, %r10, 8), %r10 + + /* Prep for the structure cases: scratch area in redzone. */ + leaq -20(%rsp), %rsi + jmp *%r10 + + .balign 8 +L(store_table): +E(L(store_table), UNIX64_RET_VOID) + ret +E(L(store_table), UNIX64_RET_UINT8) + movzbl %al, %eax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_UINT16) + movzwl %ax, %eax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_UINT32) + movl %eax, %eax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_SINT8) + movsbq %al, %rax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_SINT16) + movswq %ax, %rax + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_SINT32) + cltq + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_INT64) + movq %rax, (%rdi) + ret +E(L(store_table), UNIX64_RET_XMM32) + movd %xmm0, (%rdi) + ret +E(L(store_table), UNIX64_RET_XMM64) + movq %xmm0, (%rdi) + ret +E(L(store_table), UNIX64_RET_X87) + fstpt (%rdi) + ret +E(L(store_table), UNIX64_RET_X87_2) + fstpt (%rdi) + fstpt 16(%rdi) + ret +E(L(store_table), UNIX64_RET_ST_XMM0_RAX) + movq %rax, 8(%rsi) + jmp L(s3) +E(L(store_table), UNIX64_RET_ST_RAX_XMM0) + movq %xmm0, 8(%rsi) + jmp L(s2) +E(L(store_table), UNIX64_RET_ST_XMM0_XMM1) + movq %xmm1, 8(%rsi) + jmp L(s3) +E(L(store_table), UNIX64_RET_ST_RAX_RDX) + movq %rdx, 8(%rsi) +L(s2): + movq %rax, (%rsi) + shrl $UNIX64_SIZE_SHIFT, %ecx + rep movsb + ret + .balign 8 +L(s3): + movq %xmm0, (%rsi) + shrl $UNIX64_SIZE_SHIFT, %ecx + rep movsb + ret + +L(sa): call PLT(C(abort)) + + /* Many times we can avoid loading any SSE registers at all. + It's not worth an indirect jump to load the exact set of + SSE registers needed; zero or all is a good compromise. */ + .balign 2 +L(UW3): + /* cfi_restore_state */ +L(load_sse): + movdqa 0x30(%r10), %xmm0 + movdqa 0x40(%r10), %xmm1 + movdqa 0x50(%r10), %xmm2 + movdqa 0x60(%r10), %xmm3 + movdqa 0x70(%r10), %xmm4 + movdqa 0x80(%r10), %xmm5 + movdqa 0x90(%r10), %xmm6 + movdqa 0xa0(%r10), %xmm7 + jmp L(ret_from_load_sse) + +L(UW4): +ENDF(C(ffi_call_unix64)) + +/* 6 general registers, 8 vector registers, + 32 bytes of rvalue, 8 bytes of alignment. */ +#define ffi_closure_OFS_G 0 +#define ffi_closure_OFS_V (6*8) +#define ffi_closure_OFS_RVALUE (ffi_closure_OFS_V + 8*16) +#define ffi_closure_FS (ffi_closure_OFS_RVALUE + 32 + 8) + +/* The location of rvalue within the red zone after deallocating the frame. */ +#define ffi_closure_RED_RVALUE (ffi_closure_OFS_RVALUE - ffi_closure_FS) + + .balign 2 + .globl C(ffi_closure_unix64_sse) + FFI_HIDDEN(C(ffi_closure_unix64_sse)) + +C(ffi_closure_unix64_sse): +L(UW5): + subq $ffi_closure_FS, %rsp +L(UW6): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ + + movdqa %xmm0, ffi_closure_OFS_V+0x00(%rsp) + movdqa %xmm1, ffi_closure_OFS_V+0x10(%rsp) + movdqa %xmm2, ffi_closure_OFS_V+0x20(%rsp) + movdqa %xmm3, ffi_closure_OFS_V+0x30(%rsp) + movdqa %xmm4, ffi_closure_OFS_V+0x40(%rsp) + movdqa %xmm5, ffi_closure_OFS_V+0x50(%rsp) + movdqa %xmm6, ffi_closure_OFS_V+0x60(%rsp) + movdqa %xmm7, ffi_closure_OFS_V+0x70(%rsp) + jmp L(sse_entry1) + +L(UW7): +ENDF(C(ffi_closure_unix64_sse)) + + .balign 2 + .globl C(ffi_closure_unix64) + FFI_HIDDEN(C(ffi_closure_unix64)) + +C(ffi_closure_unix64): +L(UW8): + subq $ffi_closure_FS, %rsp +L(UW9): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ +L(sse_entry1): + movq %rdi, ffi_closure_OFS_G+0x00(%rsp) + movq %rsi, ffi_closure_OFS_G+0x08(%rsp) + movq %rdx, ffi_closure_OFS_G+0x10(%rsp) + movq %rcx, ffi_closure_OFS_G+0x18(%rsp) + movq %r8, ffi_closure_OFS_G+0x20(%rsp) + movq %r9, ffi_closure_OFS_G+0x28(%rsp) + +#ifdef __ILP32__ + movl FFI_TRAMPOLINE_SIZE(%r10), %edi /* Load cif */ + movl FFI_TRAMPOLINE_SIZE+4(%r10), %esi /* Load fun */ + movl FFI_TRAMPOLINE_SIZE+8(%r10), %edx /* Load user_data */ +#else + movq FFI_TRAMPOLINE_SIZE(%r10), %rdi /* Load cif */ + movq FFI_TRAMPOLINE_SIZE+8(%r10), %rsi /* Load fun */ + movq FFI_TRAMPOLINE_SIZE+16(%r10), %rdx /* Load user_data */ +#endif +L(do_closure): + leaq ffi_closure_OFS_RVALUE(%rsp), %rcx /* Load rvalue */ + movq %rsp, %r8 /* Load reg_args */ + leaq ffi_closure_FS+8(%rsp), %r9 /* Load argp */ + call PLT(C(ffi_closure_unix64_inner)) + + /* Deallocate stack frame early; return value is now in redzone. */ + addq $ffi_closure_FS, %rsp +L(UW10): + /* cfi_adjust_cfa_offset(-ffi_closure_FS) */ + + /* The first byte of the return value contains the FFI_TYPE. */ + cmpb $UNIX64_RET_LAST, %al + movzbl %al, %r10d + leaq L(load_table)(%rip), %r11 + ja L(la) + leaq (%r11, %r10, 8), %r10 + leaq ffi_closure_RED_RVALUE(%rsp), %rsi + jmp *%r10 + + .balign 8 +L(load_table): +E(L(load_table), UNIX64_RET_VOID) + ret +E(L(load_table), UNIX64_RET_UINT8) + movzbl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_UINT16) + movzwl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_UINT32) + movl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_SINT8) + movsbl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_SINT16) + movswl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_SINT32) + movl (%rsi), %eax + ret +E(L(load_table), UNIX64_RET_INT64) + movq (%rsi), %rax + ret +E(L(load_table), UNIX64_RET_XMM32) + movd (%rsi), %xmm0 + ret +E(L(load_table), UNIX64_RET_XMM64) + movq (%rsi), %xmm0 + ret +E(L(load_table), UNIX64_RET_X87) + fldt (%rsi) + ret +E(L(load_table), UNIX64_RET_X87_2) + fldt 16(%rsi) + fldt (%rsi) + ret +E(L(load_table), UNIX64_RET_ST_XMM0_RAX) + movq 8(%rsi), %rax + jmp L(l3) +E(L(load_table), UNIX64_RET_ST_RAX_XMM0) + movq 8(%rsi), %xmm0 + jmp L(l2) +E(L(load_table), UNIX64_RET_ST_XMM0_XMM1) + movq 8(%rsi), %xmm1 + jmp L(l3) +E(L(load_table), UNIX64_RET_ST_RAX_RDX) + movq 8(%rsi), %rdx +L(l2): + movq (%rsi), %rax + ret + .balign 8 +L(l3): + movq (%rsi), %xmm0 + ret + +L(la): call PLT(C(abort)) + +L(UW11): +ENDF(C(ffi_closure_unix64)) + + .balign 2 + .globl C(ffi_go_closure_unix64_sse) + FFI_HIDDEN(C(ffi_go_closure_unix64_sse)) + +C(ffi_go_closure_unix64_sse): +L(UW12): + subq $ffi_closure_FS, %rsp +L(UW13): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ + + movdqa %xmm0, ffi_closure_OFS_V+0x00(%rsp) + movdqa %xmm1, ffi_closure_OFS_V+0x10(%rsp) + movdqa %xmm2, ffi_closure_OFS_V+0x20(%rsp) + movdqa %xmm3, ffi_closure_OFS_V+0x30(%rsp) + movdqa %xmm4, ffi_closure_OFS_V+0x40(%rsp) + movdqa %xmm5, ffi_closure_OFS_V+0x50(%rsp) + movdqa %xmm6, ffi_closure_OFS_V+0x60(%rsp) + movdqa %xmm7, ffi_closure_OFS_V+0x70(%rsp) + jmp L(sse_entry2) + +L(UW14): +ENDF(C(ffi_go_closure_unix64_sse)) + + .balign 2 + .globl C(ffi_go_closure_unix64) + FFI_HIDDEN(C(ffi_go_closure_unix64)) + +C(ffi_go_closure_unix64): +L(UW15): + subq $ffi_closure_FS, %rsp +L(UW16): + /* cfi_adjust_cfa_offset(ffi_closure_FS) */ +L(sse_entry2): + movq %rdi, ffi_closure_OFS_G+0x00(%rsp) + movq %rsi, ffi_closure_OFS_G+0x08(%rsp) + movq %rdx, ffi_closure_OFS_G+0x10(%rsp) + movq %rcx, ffi_closure_OFS_G+0x18(%rsp) + movq %r8, ffi_closure_OFS_G+0x20(%rsp) + movq %r9, ffi_closure_OFS_G+0x28(%rsp) + +#ifdef __ILP32__ + movl 4(%r10), %edi /* Load cif */ + movl 8(%r10), %esi /* Load fun */ + movl %r10d, %edx /* Load closure (user_data) */ +#else + movq 8(%r10), %rdi /* Load cif */ + movq 16(%r10), %rsi /* Load fun */ + movq %r10, %rdx /* Load closure (user_data) */ +#endif + jmp L(do_closure) + +L(UW17): +ENDF(C(ffi_go_closure_unix64)) + +/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */ + +#ifdef __APPLE__ +.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support +EHFrame0: +#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE) +.section .eh_frame,"a",@unwind +#else +.section .eh_frame,"a",@progbits +#endif + +#ifdef HAVE_AS_X86_PCREL +# define PCREL(X) X - . +#else +# define PCREL(X) X@rel +#endif + +/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */ +#define ADV(N, P) .byte 2, L(N)-L(P) + + .balign 8 +L(CIE): + .set L(set0),L(ECIE)-L(SCIE) + .long L(set0) /* CIE Length */ +L(SCIE): + .long 0 /* CIE Identifier Tag */ + .byte 1 /* CIE Version */ + .ascii "zR\0" /* CIE Augmentation */ + .byte 1 /* CIE Code Alignment Factor */ + .byte 0x78 /* CIE Data Alignment Factor */ + .byte 0x10 /* CIE RA Column */ + .byte 1 /* Augmentation size */ + .byte 0x1b /* FDE Encoding (pcrel sdata4) */ + .byte 0xc, 7, 8 /* DW_CFA_def_cfa, %rsp offset 8 */ + .byte 0x80+16, 1 /* DW_CFA_offset, %rip offset 1*-8 */ + .balign 8 +L(ECIE): + + .set L(set1),L(EFDE1)-L(SFDE1) + .long L(set1) /* FDE Length */ +L(SFDE1): + .long L(SFDE1)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW0)) /* Initial location */ + .long L(UW4)-L(UW0) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW1, UW0) + .byte 0xc, 6, 32 /* DW_CFA_def_cfa, %rbp 32 */ + .byte 0x80+6, 2 /* DW_CFA_offset, %rbp 2*-8 */ + ADV(UW2, UW1) + .byte 0xa /* DW_CFA_remember_state */ + .byte 0xc, 7, 8 /* DW_CFA_def_cfa, %rsp 8 */ + .byte 0xc0+6 /* DW_CFA_restore, %rbp */ + ADV(UW3, UW2) + .byte 0xb /* DW_CFA_restore_state */ + .balign 8 +L(EFDE1): + + .set L(set2),L(EFDE2)-L(SFDE2) + .long L(set2) /* FDE Length */ +L(SFDE2): + .long L(SFDE2)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW5)) /* Initial location */ + .long L(UW7)-L(UW5) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW6, UW5) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + .balign 8 +L(EFDE2): + + .set L(set3),L(EFDE3)-L(SFDE3) + .long L(set3) /* FDE Length */ +L(SFDE3): + .long L(SFDE3)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW8)) /* Initial location */ + .long L(UW11)-L(UW8) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW9, UW8) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + ADV(UW10, UW9) + .byte 0xe, 8 /* DW_CFA_def_cfa_offset 8 */ +L(EFDE3): + + .set L(set4),L(EFDE4)-L(SFDE4) + .long L(set4) /* FDE Length */ +L(SFDE4): + .long L(SFDE4)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW12)) /* Initial location */ + .long L(UW14)-L(UW12) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW13, UW12) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + .balign 8 +L(EFDE4): + + .set L(set5),L(EFDE5)-L(SFDE5) + .long L(set5) /* FDE Length */ +L(SFDE5): + .long L(SFDE5)-L(CIE) /* FDE CIE offset */ + .long PCREL(L(UW15)) /* Initial location */ + .long L(UW17)-L(UW15) /* Address range */ + .byte 0 /* Augmentation size */ + ADV(UW16, UW15) + .byte 0xe /* DW_CFA_def_cfa_offset */ + .byte ffi_closure_FS + 8, 1 /* uleb128, assuming 128 <= FS < 255 */ + .balign 8 +L(EFDE5): +#ifdef __APPLE__ + .subsections_via_symbols + .section __LD,__compact_unwind,regular,debug + + /* compact unwind for ffi_call_unix64 */ + .quad C(ffi_call_unix64) + .set L1,L(UW4)-L(UW0) + .long L1 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_closure_unix64_sse */ + .quad C(ffi_closure_unix64_sse) + .set L2,L(UW7)-L(UW5) + .long L2 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_closure_unix64 */ + .quad C(ffi_closure_unix64) + .set L3,L(UW11)-L(UW8) + .long L3 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_go_closure_unix64_sse */ + .quad C(ffi_go_closure_unix64_sse) + .set L4,L(UW14)-L(UW12) + .long L4 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 + + /* compact unwind for ffi_go_closure_unix64 */ + .quad C(ffi_go_closure_unix64) + .set L5,L(UW17)-L(UW15) + .long L5 + .long 0x04000000 /* use dwarf unwind info */ + .quad 0 + .quad 0 +#endif + +#endif /* __x86_64__ */ +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/libffi/x86/win64_x86_64.S b/module/src/main/cpp/whale/src/libffi/x86/win64_x86_64.S new file mode 100644 index 00000000..f14304c1 --- /dev/null +++ b/module/src/main/cpp/whale/src/libffi/x86/win64_x86_64.S @@ -0,0 +1,237 @@ +#ifdef __x86_64__ + +#define LIBFFI_ASM +#include +#include +#include +#include "asmnames.h" + +#if defined(HAVE_AS_CFI_PSEUDO_OP) + .cfi_sections .debug_frame +#endif + +#ifdef X86_WIN64 +#define SEH(...) __VA_ARGS__ +#define arg0 %rcx +#define arg1 %rdx +#define arg2 %r8 +#define arg3 %r9 +#else +#define SEH(...) +#define arg0 %rdi +#define arg1 %rsi +#define arg2 %rdx +#define arg3 %rcx +#endif + +/* This macro allows the safe creation of jump tables without an + actual table. The entry points into the table are all 8 bytes. + The use of ORG asserts that we're at the correct location. */ +/* ??? The clang assembler doesn't handle .org with symbolic expressions. */ +#if defined(__clang__) || defined(__APPLE__) || (defined (__sun__) && defined(__svr4__)) +# define E(BASE, X) .balign 8 +#else +# define E(BASE, X) .balign 8; .org BASE + X * 8 +#endif + + .text + +/* ffi_call_win64 (void *stack, struct win64_call_frame *frame, void *r10) + + Bit o trickiness here -- FRAME is the base of the stack frame + for this function. This has been allocated by ffi_call. We also + deallocate some of the stack that has been alloca'd. */ + + .align 8 + .globl C(ffi_call_win64) + + SEH(.seh_proc ffi_call_win64) +C(ffi_call_win64): + cfi_startproc + /* Set up the local stack frame and install it in rbp/rsp. */ + movq (%rsp), %rax + movq %rbp, (arg1) + movq %rax, 8(arg1) + movq arg1, %rbp + cfi_def_cfa(%rbp, 16) + cfi_rel_offset(%rbp, 0) + SEH(.seh_pushreg %rbp) + SEH(.seh_setframe %rbp, 0) + SEH(.seh_endprologue) + movq arg0, %rsp + + movq arg2, %r10 + + /* Load all slots into both general and xmm registers. */ + movq (%rsp), %rcx + movsd (%rsp), %xmm0 + movq 8(%rsp), %rdx + movsd 8(%rsp), %xmm1 + movq 16(%rsp), %r8 + movsd 16(%rsp), %xmm2 + movq 24(%rsp), %r9 + movsd 24(%rsp), %xmm3 + + call *16(%rbp) + + movl 24(%rbp), %ecx + movq 32(%rbp), %r8 + leaq 0f(%rip), %r10 + cmpl $FFI_TYPE_SMALL_STRUCT_4B, %ecx + leaq (%r10, %rcx, 8), %r10 + ja 99f + jmp *%r10 + +/* Below, we're space constrained most of the time. Thus we eschew the + modern "mov, pop, ret" sequence (5 bytes) for "leave, ret" (2 bytes). */ +.macro epilogue + leaveq + cfi_remember_state + cfi_def_cfa(%rsp, 8) + cfi_restore(%rbp) + ret + cfi_restore_state +.endm + + .align 8 +0: +E(0b, FFI_TYPE_VOID) + epilogue +E(0b, FFI_TYPE_INT) + movslq %eax, %rax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_FLOAT) + movss %xmm0, (%r8) + epilogue +E(0b, FFI_TYPE_DOUBLE) + movsd %xmm0, (%r8) + epilogue +E(0b, FFI_TYPE_LONGDOUBLE) + call PLT(C(abort)) +E(0b, FFI_TYPE_UINT8) + movzbl %al, %eax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT8) + movsbq %al, %rax + jmp 98f +E(0b, FFI_TYPE_UINT16) + movzwl %ax, %eax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT16) + movswq %ax, %rax + jmp 98f +E(0b, FFI_TYPE_UINT32) + movl %eax, %eax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT32) + movslq %eax, %rax + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_UINT64) +98: movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_SINT64) + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_STRUCT) + epilogue +E(0b, FFI_TYPE_POINTER) + movq %rax, (%r8) + epilogue +E(0b, FFI_TYPE_COMPLEX) + call PLT(C(abort)) +E(0b, FFI_TYPE_SMALL_STRUCT_1B) + movb %al, (%r8) + epilogue +E(0b, FFI_TYPE_SMALL_STRUCT_2B) + movw %ax, (%r8) + epilogue +E(0b, FFI_TYPE_SMALL_STRUCT_4B) + movl %eax, (%r8) + epilogue + + .align 8 +99: call PLT(C(abort)) + + epilogue + + cfi_endproc + SEH(.seh_endproc) + + +/* 32 bytes of outgoing register stack space, 8 bytes of alignment, + 16 bytes of result, 32 bytes of xmm registers. */ +#define ffi_clo_FS (32+8+16+32) +#define ffi_clo_OFF_R (32+8) +#define ffi_clo_OFF_X (32+8+16) + + .align 8 + .globl C(ffi_go_closure_win64) + + SEH(.seh_proc ffi_go_closure_win64) +C(ffi_go_closure_win64): + cfi_startproc + /* Save all integer arguments into the incoming reg stack space. */ + movq %rcx, 8(%rsp) + movq %rdx, 16(%rsp) + movq %r8, 24(%rsp) + movq %r9, 32(%rsp) + + movq 8(%r10), %rcx /* load cif */ + movq 16(%r10), %rdx /* load fun */ + movq %r10, %r8 /* closure is user_data */ + jmp 0f + cfi_endproc + SEH(.seh_endproc) + + .align 8 + .globl C(ffi_closure_win64) + + SEH(.seh_proc ffi_closure_win64) +C(ffi_closure_win64): + cfi_startproc + /* Save all integer arguments into the incoming reg stack space. */ + movq %rcx, 8(%rsp) + movq %rdx, 16(%rsp) + movq %r8, 24(%rsp) + movq %r9, 32(%rsp) + + movq FFI_TRAMPOLINE_SIZE(%r10), %rcx /* load cif */ + movq FFI_TRAMPOLINE_SIZE+8(%r10), %rdx /* load fun */ + movq FFI_TRAMPOLINE_SIZE+16(%r10), %r8 /* load user_data */ +0: + subq $ffi_clo_FS, %rsp + cfi_adjust_cfa_offset(ffi_clo_FS) + SEH(.seh_stackalloc ffi_clo_FS) + SEH(.seh_endprologue) + + /* Save all sse arguments into the stack frame. */ + movsd %xmm0, ffi_clo_OFF_X(%rsp) + movsd %xmm1, ffi_clo_OFF_X+8(%rsp) + movsd %xmm2, ffi_clo_OFF_X+16(%rsp) + movsd %xmm3, ffi_clo_OFF_X+24(%rsp) + + leaq ffi_clo_OFF_R(%rsp), %r9 + call PLT(C(ffi_closure_win64_inner)) + + /* Load the result into both possible result registers. */ + movq ffi_clo_OFF_R(%rsp), %rax + movsd ffi_clo_OFF_R(%rsp), %xmm0 + + addq $ffi_clo_FS, %rsp + cfi_adjust_cfa_offset(-ffi_clo_FS) + ret + + cfi_endproc + SEH(.seh_endproc) + +#if defined __ELF__ && defined __linux__ + .section .note.GNU-stack,"",@progbits +#endif + + +#endif \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/platform/linux/elf_image.cc b/module/src/main/cpp/whale/src/platform/linux/elf_image.cc new file mode 100644 index 00000000..9a9f0eb7 --- /dev/null +++ b/module/src/main/cpp/whale/src/platform/linux/elf_image.cc @@ -0,0 +1,203 @@ +#include "platform/linux/elf_image.h" + +#define SHT_GNU_HASH 0x6ffffff6 + +namespace whale { + +static u4 ElfHash(const char *name) { + const u1 *name_bytes = reinterpret_cast(name); + u4 h = 0, g; + while (*name_bytes) { + h = (h << 4) + *name_bytes++; + g = h & 0xf0000000; + h ^= g; + h ^= g >> 24; + } + return h; +} + +static u4 GnuHash(const char *name) { + const u1 *name_bytes = reinterpret_cast(name); + u4 h = 5381; + while (*name_bytes) { + h += (h << 5) + *name_bytes++; + } + return h; +} + +Elf_Sym *ElfParser::LinearLookup(const char *name, Elf_Sym *symtab, + uintptr_t symcnt, const char *strtab) { + + while (symcnt > 0) { + --symcnt; + if ((symtab[symcnt].st_info & (STT_OBJECT | STT_FUNC)) != 0) { + if (strcmp(strtab + symtab[symcnt].st_name, name) == 0) { + return symtab + symcnt; + } + } + } + return nullptr; +} + +Elf_Sym *ElfParser::ElfLookup(const char *name) { + uint_fast32_t hash = ElfHash(name); + + for (uintptr_t n = bucket_[hash % nbucket_]; n != 0; n = chain_[n]) { + Elf_Sym *sym = dynsym_ + n; + if (strcmp(dynstr_ + sym->st_name, name) == 0) { + return sym; + } + } + return NULL; +} + +Elf_Sym *ElfParser::GnuLookup(const char *name) { + static constexpr uint_fast32_t bloom_mask_bits = sizeof(ElfW(Addr)) * 8; + + uint32_t hashval = GnuHash(name); + uint32_t hashval2 = hashval >> gnu_shift2_; + Elf_Addr bloom_word = gnu_bloom_filter_[(hashval / bloom_mask_bits) & + gnu_maskwords_bm_]; + + if ((1 & (bloom_word >> (hashval % bloom_mask_bits)) & + (bloom_word >> (hashval2 % bloom_mask_bits))) != 0) { + uint_fast32_t sym_index = gnu_bucket_[hashval % gnu_nbucket_]; + if (sym_index != 0) { + do { + Elf_Sym *sym = dynsym_ + sym_index; + if (((gnu_chain_[sym_index] ^ hashval) >> 1) == 0 + && strcmp(dynstr_ + sym->st_name, name) == 0) { + return sym; + } + } while ((gnu_chain_[sym_index++] & 1) == 0); + } + } + return nullptr; +} + +uintptr_t ElfParser::FindSymbolOffset(const char *name) { + Elf_Sym *sym = nullptr; + if (gnu_nbucket_ > 0) { + sym = GnuLookup(name); + } + if (sym == nullptr && nbucket_ > 0) { + sym = ElfLookup(name); + } + if (sym == nullptr && symtab_ != nullptr) { + sym = LinearLookup(name, symtab_, + symcnt_, strtab_); + } + if (sym != nullptr) { + return static_cast(sym->st_value + load_bias_); + } + return 0; +} + +bool ElfParser::Parse(uintptr_t base) { + load_bias_ = INT_MAX; + ehdr_ = reinterpret_cast(base); + phdr_ = OffsetOf(base, ehdr_->e_phoff); + shdr_ = OffsetOf(base, ehdr_->e_shoff); + + if (ehdr_->e_shnum <= 0) { + return false; + } + shstrtab_ = OffsetOf(ehdr_, shdr_[ehdr_->e_shstrndx].sh_offset); + dynstr_ = nullptr; + for (int i = 0; i < ehdr_->e_shnum; ++i) { + switch (shdr_[i].sh_type) { + case SHT_SYMTAB: + symtab_ = OffsetOf(ehdr_, shdr_[i].sh_offset); + symcnt_ = shdr_[i].sh_size / sizeof(Elf_Sym); + break; + case SHT_DYNSYM: + dynsym_ = OffsetOf(ehdr_, shdr_[i].sh_offset); + dyncnt_ = shdr_[i].sh_size / sizeof(Elf_Sym); + break; + case SHT_STRTAB: { + char *name = shstrtab_ + shdr_[i].sh_name; + char *table = OffsetOf(ehdr_, shdr_[i].sh_offset); + if (dynstr_ == nullptr) { + dynstr_ = table; + } else if (!strcmp(name, ".strtab")) { + strtab_ = table; + } + break; + } + case SHT_PROGBITS: { + if (load_bias_ == INT_MAX) { + load_bias_ = static_cast(shdr_[i].sh_offset - shdr_[i].sh_addr); + } else { + char *name = shstrtab_ + shdr_[i].sh_name; + if (!strcmp(name, ".got")) { + got_ = &shdr_[i]; + } else if (!strcmp(name, ".got.plt")) { + got_plt_ = &shdr_[i]; + } + } + break; + } + case SHT_HASH: { + Elf_Word *d_un = OffsetOf(ehdr_, shdr_[i].sh_offset); + nbucket_ = d_un[0]; + nchain_ = d_un[1]; + bucket_ = d_un + 2; + chain_ = bucket_ + nbucket_; + break; + } + case SHT_GNU_HASH: { + Elf_Word *d_buf = OffsetOf(ehdr_, shdr_[i].sh_offset); + gnu_nbucket_ = d_buf[0]; + gnu_symndx_ = d_buf[1]; + gnu_maskwords_bm_ = d_buf[2]; + if (ehdr_->e_ident[EI_CLASS] == ELFCLASS64) { + gnu_maskwords_bm_ *= 2; + } + gnu_shift2_ = d_buf[3]; + gnu_bloom_filter_ = d_buf + 4; + gnu_bucket_ = d_buf + 4 + gnu_maskwords_bm_; + gnu_chain_ = gnu_bucket_ + gnu_nbucket_ - + gnu_symndx_; + --gnu_maskwords_bm_; + break; + } + default: { + char *name = shstrtab_ + shdr_[i].sh_name; + if (!strcmp(name, ".rel.dyn")) { + rel_dyn_ = &shdr_[i]; + } else if (!strcmp(name, ".rel.plt")) { + rel_plt_ = &shdr_[i]; + } else if (!strcmp(name, ".rela.dyn")) { + rela_dyn_ = &shdr_[i]; + } else if (!strcmp(name, ".rela.plt")) { + rela_plt_ = &shdr_[i]; + } + break; + } + } // end switch + } + return true; +} + +bool ElfReader::Open(const char *path) { + // open will return -1 in houdini, so we use fopen. + FILE *file = fopen(path, "rbe"); + if (file == nullptr) { + LOG(ERROR) << "failed to open: " << path << ", err: " << strerror(errno); + return false; + } + int fd = fileno(file); + struct stat stat; + if (fstat(fd, &stat) != 0) { + return false; + } + len_ = static_cast(stat.st_size); + base_ = mmap(nullptr, len_, PROT_READ, MAP_PRIVATE, fd, 0); + if (base_ == MAP_FAILED) { + return false; + } + TEMP_FAILURE_RETRY(read(fd, base_, len_)); + return true; +} + +} // namespace whale diff --git a/module/src/main/cpp/whale/src/platform/linux/elf_image.h b/module/src/main/cpp/whale/src/platform/linux/elf_image.h new file mode 100644 index 00000000..ab109fea --- /dev/null +++ b/module/src/main/cpp/whale/src/platform/linux/elf_image.h @@ -0,0 +1,157 @@ +#ifndef WHALE_PLATFORM_LINUX_ELF_READER_H_ +#define WHALE_PLATFORM_LINUX_ELF_READER_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "base/cxx_helper.h" +#include "base/primitive_types.h" +#include "base/logging.h" + +#if defined(__LP64__) +#define Elf_Ehdr Elf64_Ehdr +#define Elf_Shdr Elf64_Shdr +#define Elf_Phdr Elf64_Phdr +#define Elf_Sym Elf64_Sym +#define Elf_Word Elf64_Word +#define Elf_Addr Elf64_Addr +#define ElfW(what) Elf64_ ## what +#define ELF_R_SYM ELF64_R_SYM +#define ELF_R_TYPE ELF64_R_TYPE +#else +#define Elf_Ehdr Elf32_Ehdr +#define Elf_Shdr Elf32_Shdr +#define Elf_Phdr Elf32_Phdr +#define Elf_Sym Elf32_Sym +#define Elf_Word Elf32_Word +#define Elf_Addr Elf32_Addr +#define ElfW(what) Elf32_ ## what +#define ELF_R_SYM ELF32_R_SYM +#define ELF_R_TYPE ELF32_R_TYPE +#endif + +namespace whale { + +class ElfParser { + public: + ElfParser() {} + + intptr_t GetLoadBias() { + return load_bias_; + } + + Elf_Sym * + LinearLookup(const char *name, Elf_Sym *symtab, uintptr_t symcnt, const char *strtab); + + Elf_Sym *ElfLookup(const char *name); + + Elf_Sym *GnuLookup(const char *name); + + + uintptr_t FindSymbolOffset(const char *name); + + bool Parse(uintptr_t base); + + private: + Elf_Ehdr *ehdr_; + Elf_Phdr *phdr_; + Elf_Shdr *shdr_; + uintptr_t load_bias_; + + Elf_Shdr *got_; + Elf_Shdr *got_plt_; + Elf_Shdr *rel_dyn_; + Elf_Shdr *rel_plt_; + Elf_Shdr *rela_dyn_; + Elf_Shdr *rela_plt_; + + char *shstrtab_; + char *strtab_; + char *dynstr_; + Elf_Sym *symtab_; + uintptr_t symcnt_; + Elf_Sym *dynsym_; + uintptr_t dyncnt_; + + u4 nbucket_{}; + u4 nchain_{}; + u4 *bucket_{}; + u4 *chain_{}; + + u4 gnu_nbucket_{}; + u4 gnu_symndx_{}; + u4 gnu_maskwords_bm_; + u4 gnu_shift2_; + u4 *gnu_bloom_filter_; + u4 *gnu_bucket_; + u4 *gnu_chain_; +}; + +class ElfReader { + public: + ElfReader() : base_(nullptr), len_(0), fp_(nullptr) {} + + ~ElfReader() { + if (fp_) { + fclose(fp_); + } + if (base_ != nullptr && base_ != MAP_FAILED) { + munmap(base_, len_); + } + } + + bool Open(const char *path); + + bool ReadSectionHeaders() { + return parser_.Parse(reinterpret_cast(base_)); + } + + intptr_t GetLoadBias() { + return parser_.GetLoadBias(); + } + + uintptr_t FindSymbolOffset(const char *name) { + return parser_.FindSymbolOffset(name); + } + + private: + void *base_; + size_t len_; + FILE *fp_; + ElfParser parser_; +}; + + +class ElfImage { + public: + ElfImage() : base_(0) {} + + bool Open(const char *path, uintptr_t base) { + base_ = base; + return reader_.Open(path) && reader_.ReadSectionHeaders(); + } + + template + T FindSymbol(const char *name) { + uintptr_t offset = reader_.FindSymbolOffset(name); + if (offset > 0) { + uintptr_t ptr = base_ + offset; + return ForceCast(reinterpret_cast(ptr)); + } + return (T) nullptr; + } + + + private: + ElfReader reader_; + uintptr_t base_; +}; + +} // namespace whale + +#endif // WHALE_PLATFORM_LINUX_ELF_READER_H_ diff --git a/module/src/main/cpp/whale/src/platform/linux/process_map.cc b/module/src/main/cpp/whale/src/platform/linux/process_map.cc new file mode 100644 index 00000000..cad69b43 --- /dev/null +++ b/module/src/main/cpp/whale/src/platform/linux/process_map.cc @@ -0,0 +1,82 @@ +#include +#include +#include +#include "platform/linux/process_map.h" +#include "process_map.h" + + +namespace whale { + +std::unique_ptr FindFileMemoryRange(const char *name) { + std::unique_ptr range(new MemoryRange); + range->base_ = UINTPTR_MAX; + ForeachMemoryRange( + [&](uintptr_t begin, uintptr_t end, char *perm, char *mapname) -> bool { + if (strstr(mapname, name)) { + if (range->path_ == nullptr) { + range->path_ = strdup(mapname); + } + if (range->base_ > begin) { + range->base_ = begin; + } + if (range->end_ < end) { + range->end_ = end; + } + } + return true; + }); + return range; +} + +std::unique_ptr FindExecuteMemoryRange(const char *name) { + std::unique_ptr range(new MemoryRange); + ForeachMemoryRange( + [&](uintptr_t begin, uintptr_t end, char *perm, char *mapname) -> bool { + if (strncmp(mapname, "/system/fake-libs/", 18) == 0) { + return true; + } + if (strstr(mapname, name) && strstr(perm, "x") && strstr(perm, "r")) { + range->path_ = strdup(mapname); + range->base_ = begin; + range->end_ = end; + return false; + } + return true; + }); + return range; +} + +void +ForeachMemoryRange(std::function callback) { + FILE *f; + if ((f = fopen("/proc/self/maps", "r"))) { + char buf[PATH_MAX], perm[12] = {'\0'}, dev[12] = {'\0'}, mapname[PATH_MAX] = {'\0'}; + uintptr_t begin, end, inode, foo; + + while (!feof(f)) { + if (fgets(buf, sizeof(buf), f) == 0) + break; + sscanf(buf, "%lx-%lx %s %lx %s %ld %s", &begin, &end, perm, + &foo, dev, &inode, mapname); + if (!callback(begin, end, perm, mapname)) { + break; + } + } + fclose(f); + } +} + +bool IsFileInMemory(const char *name) { + bool found = false; + ForeachMemoryRange( + [&](uintptr_t begin, uintptr_t end, char *perm, char *mapname) -> bool { + if (strstr(mapname, name)) { + found = true; + return false; + } + return true; + }); + return found; +} + +} // namespace whale diff --git a/module/src/main/cpp/whale/src/platform/linux/process_map.h b/module/src/main/cpp/whale/src/platform/linux/process_map.h new file mode 100644 index 00000000..119963f1 --- /dev/null +++ b/module/src/main/cpp/whale/src/platform/linux/process_map.h @@ -0,0 +1,44 @@ +#ifndef WHALE_PLATFORM_PROCESS_MAP_H_ +#define WHALE_PLATFORM_PROCESS_MAP_H_ + +#include +#include +#include "base/primitive_types.h" + +namespace whale { + +struct MemoryRange { + public: + MemoryRange() : base_(0), end_(0), path_(nullptr) {} + + ~MemoryRange() { + if (path_ != nullptr) { + free((void *) path_); + path_ = nullptr; + } + } + + bool IsValid() { + return path_ != nullptr && base_ < end_; + } + + bool IsInRange(uintptr_t address) { + return IsValid() && base_ <= address && end_ >= address; + } + + const char *path_; + uintptr_t base_; + uintptr_t end_; +}; + +bool IsFileInMemory(const char *name); + +std::unique_ptr FindExecuteMemoryRange(const char *name); + +std::unique_ptr FindFileMemoryRange(const char *name); + +void ForeachMemoryRange(std::function callback); + +} // namespace whale + +#endif // WHALE_PLATFORM_PROCESS_MAP_H_ diff --git a/module/src/main/cpp/whale/src/platform/memory.cc b/module/src/main/cpp/whale/src/platform/memory.cc new file mode 100644 index 00000000..2013fd04 --- /dev/null +++ b/module/src/main/cpp/whale/src/platform/memory.cc @@ -0,0 +1,80 @@ +#include +#include "platform/memory.h" +#include "base/align.h" +#include "base/cxx_helper.h" +#include "base/logging.h" +#include "base/macros.h" + +#ifdef __APPLE__ + +#include +#include +#include +#include + +C_API kern_return_t mach_vm_remap(vm_map_t, mach_vm_address_t *, mach_vm_size_t, + mach_vm_offset_t, int, vm_map_t, mach_vm_address_t, + boolean_t, vm_prot_t *, vm_prot_t *, vm_inherit_t); + +#endif + +namespace whale { + +ScopedMemoryPatch::ScopedMemoryPatch(void *address, void *patch, size_t size) : address_(address), patch_(patch), + size_(size) { + CHECK(address != nullptr && size > 0); + intptr_t page_start = PageStart(reinterpret_cast(address)); + size_t page_offset = static_cast(reinterpret_cast(address) - page_start); + intptr_t page_end = PageAlign(reinterpret_cast(address) + size); + size_t page_size = static_cast(page_end - page_start); + bool use_rwx_page = + mprotect(reinterpret_cast(page_start), page_size, PROT_READ | PROT_WRITE | PROT_EXEC) == 0; + if (use_rwx_page) { + memcpy(address, patch, size); + } else { +#ifdef __APPLE__ + // + // Only rw- and r-x page permissions are available on IOS. + // + void *remap_page = mmap(nullptr, GetPageSize(), PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, 0, 0); + mach_port_t task_self = mach_task_self(); + vm_address_t vm_page_start = static_cast(page_start); + vm_size_t region_size = 0; + vm_region_submap_short_info_64 info; + mach_msg_type_number_t info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; + natural_t max_depth = UINT8_MAX; + kern_return_t kr = vm_region_recurse_64(task_self, + &vm_page_start, ®ion_size, &max_depth, + (vm_region_recurse_info_t) &info, + &info_count); + if (kr != KERN_SUCCESS) { + return; + } + vm_copy(task_self, vm_page_start, page_size, (vm_address_t) remap_page); + memcpy(OffsetOf(remap_page, page_offset), patch, size); + + mprotect(remap_page, page_size, PROT_READ | PROT_EXEC); + + vm_prot_t cur_protection, max_protection; + mach_vm_address_t mach_vm_page_start = static_cast(page_start); + mach_vm_remap(task_self, &mach_vm_page_start, page_size, 0, VM_FLAGS_OVERWRITE, + task_self, (mach_vm_address_t) remap_page, TRUE, &cur_protection, &max_protection, + info.inheritance); +#endif + } +} + +ScopedMemoryPatch::~ScopedMemoryPatch() { +#ifdef __APPLE__ + sys_icache_invalidate(reinterpret_cast(address_), size_); +#else + __builtin___clear_cache( + reinterpret_cast(address_), + reinterpret_cast(reinterpret_cast(address_) + size_) + ); +#endif +} + + +} // namespace whale diff --git a/module/src/main/cpp/whale/src/platform/memory.h b/module/src/main/cpp/whale/src/platform/memory.h new file mode 100644 index 00000000..66f2c0ea --- /dev/null +++ b/module/src/main/cpp/whale/src/platform/memory.h @@ -0,0 +1,22 @@ +#ifndef PLATFORM_SCOPED_MEMORY_ACCESS_H_ +#define PLATFORM_SCOPED_MEMORY_ACCESS_H_ + +#include + +namespace whale { + +class ScopedMemoryPatch { +public: + ScopedMemoryPatch(void *address, void *patch, size_t size); + + ~ScopedMemoryPatch(); + +private: + void *address_; + void *patch_; + size_t size_; +}; + +} // namespace whale + +#endif // PLATFORM_SCOPED_MEMORY_ACCESS_H_ diff --git a/module/src/main/cpp/whale/src/simulator/code_simulator.cc b/module/src/main/cpp/whale/src/simulator/code_simulator.cc new file mode 100644 index 00000000..ee74d0d7 --- /dev/null +++ b/module/src/main/cpp/whale/src/simulator/code_simulator.cc @@ -0,0 +1,20 @@ +#include "simulator/code_simulator.h" +#include "simulator/code_simulator_arm64.h" + +namespace whale { + +CodeSimulator *CodeSimulator::CreateCodeSimulator(InstructionSet target_isa) { + switch (target_isa) { + case InstructionSet::kArm64: + return arm64::CodeSimulatorArm64::CreateCodeSimulatorArm64(); + default: + return nullptr; + } +} + +CodeSimulator *CreateCodeSimulator(InstructionSet target_isa) { + return CodeSimulator::CreateCodeSimulator(target_isa); +} + +} // namespace whale + diff --git a/module/src/main/cpp/whale/src/simulator/code_simulator.h b/module/src/main/cpp/whale/src/simulator/code_simulator.h new file mode 100644 index 00000000..c08706fc --- /dev/null +++ b/module/src/main/cpp/whale/src/simulator/code_simulator.h @@ -0,0 +1,35 @@ +#ifndef WHALE_SIMULATOR_CODE_SIMULATOR_H_ +#define WHALE_SIMULATOR_CODE_SIMULATOR_H_ + +#include +#include "dbi/instruction_set.h" +#include "base/primitive_types.h" + +namespace whale { + +class CodeSimulator { + public: + CodeSimulator() = default; + + virtual ~CodeSimulator() = default; + + // Returns a null pointer if a simulator cannot be found for target_isa. + static CodeSimulator *CreateCodeSimulator(InstructionSet target_isa); + + virtual void RunFrom(intptr_t code_buffer) = 0; + + // Get return value according to C ABI. + virtual bool GetCReturnBool() const = 0; + + virtual s4 GetCReturnInt32() const = 0; + + virtual s8 GetCReturnInt64() const = 0; + + private: + DISALLOW_COPY_AND_ASSIGN(CodeSimulator); +}; + +} // namespace whale + +#endif // WHALE_SIMULATOR_CODE_SIMULATOR_H_ + diff --git a/module/src/main/cpp/whale/src/simulator/code_simulator_arm64.cc b/module/src/main/cpp/whale/src/simulator/code_simulator_arm64.cc new file mode 100644 index 00000000..d9107f8f --- /dev/null +++ b/module/src/main/cpp/whale/src/simulator/code_simulator_arm64.cc @@ -0,0 +1,54 @@ +#include "vixl/aarch64/instructions-aarch64.h" +#include "simulator/code_simulator_arm64.h" +#include "base/logging.h" + +namespace whale { +namespace arm64 { + +using namespace vixl::aarch64; // NOLINT(build/namespaces) + +CodeSimulatorArm64* CodeSimulatorArm64::CreateCodeSimulatorArm64() { + if (kCanSimulate) { + return new CodeSimulatorArm64(); + } else { + return nullptr; + } +} + +CodeSimulatorArm64::CodeSimulatorArm64() + : CodeSimulator(), decoder_(nullptr), simulator_(nullptr) { + CHECK(kCanSimulate); + decoder_ = new Decoder(); + simulator_ = new Simulator(decoder_); +} + +CodeSimulatorArm64::~CodeSimulatorArm64() { + CHECK(kCanSimulate); + delete simulator_; + delete decoder_; +} + +void CodeSimulatorArm64::RunFrom(intptr_t code_buffer) { + CHECK(kCanSimulate); + simulator_->RunFrom(reinterpret_cast(code_buffer)); +} + +bool CodeSimulatorArm64::GetCReturnBool() const { + CHECK(kCanSimulate); + return static_cast(simulator_->ReadWRegister(0)); +} + +int32_t CodeSimulatorArm64::GetCReturnInt32() const { + CHECK(kCanSimulate); + return simulator_->ReadWRegister(0); +} + +int64_t CodeSimulatorArm64::GetCReturnInt64() const { + CHECK(kCanSimulate); + return simulator_->ReadXRegister(0); +} + + +} // namespace arm64 +} // namespace whale + diff --git a/module/src/main/cpp/whale/src/simulator/code_simulator_arm64.h b/module/src/main/cpp/whale/src/simulator/code_simulator_arm64.h new file mode 100644 index 00000000..abdd213e --- /dev/null +++ b/module/src/main/cpp/whale/src/simulator/code_simulator_arm64.h @@ -0,0 +1,43 @@ +#ifndef WHALE_SIMULATOR_CODE_SIMULATOR_ARM64_H_ +#define WHALE_SIMULATOR_CODE_SIMULATOR_ARM64_H_ + +#include +#include "dbi/instruction_set.h" +#include "base/primitive_types.h" +#include "vixl/aarch64/decoder-aarch64.h" +#include "vixl/aarch64/simulator-aarch64.h" +#include "simulator/code_simulator.h" + +namespace whale { +namespace arm64 { + +class CodeSimulatorArm64 : public CodeSimulator { + public: + static CodeSimulatorArm64 *CreateCodeSimulatorArm64(); + + ~CodeSimulatorArm64() override; + + void RunFrom(intptr_t code_buffer) override; + + bool GetCReturnBool() const override; + + int32_t GetCReturnInt32() const override; + + int64_t GetCReturnInt64() const override; + + private: + CodeSimulatorArm64(); + + vixl::aarch64::Decoder *decoder_; + vixl::aarch64::Simulator *simulator_; + + static constexpr bool kCanSimulate = (kRuntimeISA != InstructionSet::kArm64); + + DISALLOW_COPY_AND_ASSIGN(CodeSimulatorArm64); +}; + +} +} // namespace whale + +#endif // WHALE_SIMULATOR_CODE_SIMULATOR_ARM64_H_ + diff --git a/module/src/main/cpp/whale/src/simulator/code_simulator_container.cc b/module/src/main/cpp/whale/src/simulator/code_simulator_container.cc new file mode 100644 index 00000000..e417145f --- /dev/null +++ b/module/src/main/cpp/whale/src/simulator/code_simulator_container.cc @@ -0,0 +1,17 @@ +#include "simulator/code_simulator_container.h" +#include "simulator/code_simulator.h" + +namespace whale { + + +CodeSimulatorContainer::CodeSimulatorContainer(InstructionSet target_isa) : simulator_(nullptr) { + simulator_ = CodeSimulator::CreateCodeSimulator(target_isa); +} + + +CodeSimulatorContainer::~CodeSimulatorContainer() { + delete simulator_; +} + + +} \ No newline at end of file diff --git a/module/src/main/cpp/whale/src/simulator/code_simulator_container.h b/module/src/main/cpp/whale/src/simulator/code_simulator_container.h new file mode 100644 index 00000000..fa15213d --- /dev/null +++ b/module/src/main/cpp/whale/src/simulator/code_simulator_container.h @@ -0,0 +1,43 @@ +#ifndef WHALE_SIMULATOR_CODE_SIMULATOR_CONTAINER_H_ +#define WHALE_SIMULATOR_CODE_SIMULATOR_CONTAINER_H_ + +#include +#include "simulator/code_simulator.h" +#include "base/logging.h" + +namespace whale { + + +class CodeSimulatorContainer { + public: + explicit CodeSimulatorContainer(InstructionSet target_isa); + + ~CodeSimulatorContainer(); + + bool CanSimulate() const { + return simulator_ != nullptr; + } + + CodeSimulator *Get() { + DCHECK(CanSimulate()); + return simulator_; + } + + const CodeSimulator *Get() const { + DCHECK(CanSimulate()); + return simulator_; + } + + private: + CodeSimulator *simulator_; + + DISALLOW_COPY_AND_ASSIGN(CodeSimulatorContainer); +}; + + +} // namespace whale + + +#endif // WHALE_SIMULATOR_CODE_SIMULATOR_CONTAINER_H_ + + diff --git a/module/src/main/cpp/whale/src/whale.cc b/module/src/main/cpp/whale/src/whale.cc new file mode 100644 index 00000000..36a781d0 --- /dev/null +++ b/module/src/main/cpp/whale/src/whale.cc @@ -0,0 +1,140 @@ +#include +#include "whale.h" +#include "interceptor.h" +#include "dbi/instruction_set.h" + +#if defined(__arm__) +#include "dbi/arm/inline_hook_arm.h" +#elif defined(__aarch64__) + +#include "dbi/arm64/inline_hook_arm64.h" + +#elif defined(__i386__) +#include "dbi/x86/inline_hook_x86.h" +#elif defined(__x86_64__) + +#include "dbi/x86_64/inline_hook_x86_64.h" + +#endif + +#if defined(__APPLE__) + +#include "dbi/darwin/macho_import_hook.h" + +#endif + +#if defined(linux) + +#include "platform/linux/elf_image.h" +#include "platform/linux/process_map.h" + +#endif + + +OPEN_API void WInlineHookFunction(void *address, void *replace, void **backup) { +#if defined(__arm__) + std::unique_ptr hook( + new whale::arm::ArmInlineHook( + reinterpret_cast(address), + reinterpret_cast(replace), + reinterpret_cast(backup) + ) + ); + whale::Interceptor::Instance()->AddHook(hook); +#elif defined(__aarch64__) + std::unique_ptr hook( + new whale::arm64::Arm64InlineHook( + reinterpret_cast(address), + reinterpret_cast(replace), + reinterpret_cast(backup) + ) + ); + whale::Interceptor::Instance()->AddHook(hook); +#elif defined(__i386__) + std::unique_ptr hook( + new whale::x86::X86InlineHook( + reinterpret_cast(address), + reinterpret_cast(replace), + reinterpret_cast(backup) + ) + ); + whale::Interceptor::Instance()->AddHook(hook); +#elif defined(__x86_64__) + std::unique_ptr hook( + new whale::x86_64::X86_64InlineHook( + reinterpret_cast(address), + reinterpret_cast(replace), + reinterpret_cast(backup) + ) + ); + whale::Interceptor::Instance()->AddHook(hook); +#else + LOG(WARNING) << "Unsupported ISA to Hook Function: " << whale::kRuntimeISA; +#endif +} + +OPEN_API void WImportHookFunction(const char *name, const char *libname, void *replace, void **backup) { +#if defined(__APPLE__) + std::unique_ptr hook(new whale::darwin::MachoImportHook( + name, + replace, + backup + )); + whale::Interceptor::Instance()->AddHook(hook); +#endif +} + +OPEN_API void *WDynamicLibOpen(const char *name) { +#ifdef linux + auto range = whale::FindExecuteMemoryRange(name); + if (!range->IsValid()) { + return nullptr; + } + whale::ElfImage *image = new whale::ElfImage(); + if (!image->Open(range->path_, range->base_)) { + delete image; + return nullptr; + } + return reinterpret_cast(image); +#else + return dlopen(name, RTLD_NOW); +#endif +} + +OPEN_API void *WDynamicLibOpenAlias(const char *name, const char *path) { +#ifdef linux + auto range = whale::FindExecuteMemoryRange(name); + if (!range->IsValid()) { + return nullptr; + } + whale::ElfImage *image = new whale::ElfImage(); + if (!image->Open(path, range->base_)) { + delete image; + return nullptr; + } + return reinterpret_cast(image); +#else + return dlopen(name, RTLD_NOW); +#endif +} + +OPEN_API void *WDynamicLibSymbol(void *handle, const char *name) { + if (handle == nullptr || name == nullptr) { + return nullptr; + } +#ifdef linux + whale::ElfImage *image = reinterpret_cast(handle); + return image->FindSymbol(name); +#else + return dlsym(handle, name); +#endif +} + +OPEN_API void WDynamicLibClose(void *handle) { +#ifdef linux + whale::ElfImage *image = reinterpret_cast(handle); + delete image; +#else + dlclose(handle); +#endif +} diff --git a/settings.gradle b/settings.gradle new file mode 100644 index 00000000..1d0cab1b --- /dev/null +++ b/settings.gradle @@ -0,0 +1 @@ +include ':module' diff --git a/template/magisk_module/.gitattributes b/template/magisk_module/.gitattributes new file mode 100644 index 00000000..11e33e9b --- /dev/null +++ b/template/magisk_module/.gitattributes @@ -0,0 +1,10 @@ +# Declare files that will always have LF line endings on checkout. +META-INF/** text eol=lf +*.prop text eol=lf +*.sh text eol=lf +*.md text eol=lf +sepolicy.rule text eol=lf + +# Denote all files that are truly binary and should not be modified. +system/** binary +system_x86/** binary \ No newline at end of file diff --git a/template/magisk_module/META-INF/com/google/android/update-binary b/template/magisk_module/META-INF/com/google/android/update-binary new file mode 100644 index 00000000..d19eeb55 --- /dev/null +++ b/template/magisk_module/META-INF/com/google/android/update-binary @@ -0,0 +1,173 @@ +#!/sbin/sh + +################# +# Initialization +################# + +umask 022 + +# Global vars +TMPDIR=/dev/tmp +PERSISTDIR=/sbin/.magisk/mirror/persist + +rm -rf $TMPDIR 2>/dev/null +mkdir -p $TMPDIR + +# echo before loading util_functions +ui_print() { echo "$1"; } + +require_new_magisk() { + ui_print "*******************************" + ui_print " Please install Magisk v19.0+! " + ui_print "*******************************" + exit 1 +} + +is_legacy_script() { + unzip -l "$ZIPFILE" install.sh | grep -q install.sh + return $? +} + +print_modname() { + local len + len=`echo -n $MODNAME | wc -c` + len=$((len + 2)) + local pounds=`printf "%${len}s" | tr ' ' '*'` + ui_print "$pounds" + ui_print " $MODNAME " + ui_print "$pounds" + ui_print "*******************" + ui_print " Powered by Magisk " + ui_print "*******************" +} + +############## +# Environment +############## + +OUTFD=$2 +ZIPFILE=$3 + +mount /data 2>/dev/null + +# Load utility functions +[ -f /data/adb/magisk/util_functions.sh ] || require_new_magisk +. /data/adb/magisk/util_functions.sh +[ $MAGISK_VER_CODE -gt 18100 ] || require_new_magisk + +# Preperation for flashable zips +setup_flashable + +# Mount partitions +mount_partitions + +# Detect version and architecture +api_level_arch_detect + +# Setup busybox and binaries +$BOOTMODE && boot_actions || recovery_actions + +############## +# Preparation +############## + +# Extract prop file +unzip -o "$ZIPFILE" module.prop -d $TMPDIR >&2 +[ ! -f $TMPDIR/module.prop ] && abort "! Unable to extract zip file!" + +$BOOTMODE && MODDIRNAME=modules_update || MODDIRNAME=modules +MODULEROOT=$NVBASE/$MODDIRNAME +MODID=`grep_prop id $TMPDIR/module.prop` +MODPATH=$MODULEROOT/$MODID +MODNAME=`grep_prop name $TMPDIR/module.prop` + +# Create mod paths +rm -rf $MODPATH 2>/dev/null +mkdir -p $MODPATH + +########## +# Install +########## + +if is_legacy_script; then + unzip -oj "$ZIPFILE" module.prop install.sh uninstall.sh 'common/*' -d $TMPDIR >&2 + + # Load install script + . $TMPDIR/install.sh + + # Callbacks + print_modname + on_install + + # Custom uninstaller + [ -f $TMPDIR/uninstall.sh ] && cp -af $TMPDIR/uninstall.sh $MODPATH/uninstall.sh + + # Skip mount + $SKIPMOUNT && touch $MODPATH/skip_mount + + # prop file + $PROPFILE && cp -af $TMPDIR/system.prop $MODPATH/system.prop + + # Module info + cp -af $TMPDIR/module.prop $MODPATH/module.prop + + # post-fs-data scripts + $POSTFSDATA && cp -af $TMPDIR/post-fs-data.sh $MODPATH/post-fs-data.sh + + # service scripts + $LATESTARTSERVICE && cp -af $TMPDIR/service.sh $MODPATH/service.sh + + ui_print "- Setting permissions" + set_permissions +else + print_modname + + unzip -o "$ZIPFILE" customize.sh -d $MODPATH >&2 + + if ! grep -q '^SKIPUNZIP=1$' $MODPATH/customize.sh 2>/dev/null; then + ui_print "- Extracting module files" + unzip -o "$ZIPFILE" -x 'META-INF/*' -d $MODPATH >&2 + + # Default permissions + set_perm_recursive $MODPATH 0 0 0755 0644 + fi + + # Load customization script + [ -f $MODPATH/customize.sh ] && . $MODPATH/customize.sh +fi + +# Handle replace folders +for TARGET in $REPLACE; do + ui_print "- Replace target: $TARGET" + mktouch $MODPATH$TARGET/.replace +done + +if $BOOTMODE; then + # Update info for Magisk Manager + mktouch $NVBASE/modules/$MODID/update + cp -af $MODPATH/module.prop $NVBASE/modules/$MODID/module.prop +fi + +# Copy over custom sepolicy rules +if [ -f $MODPATH/sepolicy.rule -a -e $PERSISTDIR ]; then + ui_print "- Installing custom sepolicy patch" + PERSISTMOD=$PERSISTDIR/magisk/$MODID + mkdir -p $PERSISTMOD + cp -af $MODPATH/sepolicy.rule $PERSISTMOD/sepolicy.rule +fi + +# Remove stuffs that don't belong to modules +rm -rf \ +$MODPATH/system/placeholder $MODPATH/customize.sh \ +$MODPATH/README.md $MODPATH/.git* 2>/dev/null + +############## +# Finalizing +############## + +cd / +$BOOTMODE || recovery_cleanup +rm -rf $TMPDIR + +ui_print "- Done" +exit 0 \ No newline at end of file diff --git a/template/magisk_module/META-INF/com/google/android/updater-script b/template/magisk_module/META-INF/com/google/android/updater-script new file mode 100644 index 00000000..11d5c96e --- /dev/null +++ b/template/magisk_module/META-INF/com/google/android/updater-script @@ -0,0 +1 @@ +#MAGISK diff --git a/template/magisk_module/README.md b/template/magisk_module/README.md new file mode 100644 index 00000000..c316e8b9 --- /dev/null +++ b/template/magisk_module/README.md @@ -0,0 +1 @@ +# Riru - Template \ No newline at end of file diff --git a/template/magisk_module/customize.sh b/template/magisk_module/customize.sh new file mode 100644 index 00000000..5ecaad75 --- /dev/null +++ b/template/magisk_module/customize.sh @@ -0,0 +1,65 @@ +SKIPUNZIP=1 + +# extract verify.sh +ui_print "- Extracting verify.sh" +unzip -o "$ZIPFILE" 'verify.sh' -d "$TMPDIR" >&2 +if [ ! -f "$TMPDIR/verify.sh" ]; then + ui_print "*********************************************************" + ui_print "! Unable to extract verify.sh!" + ui_print "! This zip may be corrupted, please try downloading again" + abort "*********************************************************" +fi +. $TMPDIR/verify.sh + +# extract riru.sh +extract "$ZIPFILE" 'riru.sh' "$MODPATH" +. $MODPATH/riru.sh + +check_riru_version +check_architecture + +# extract libs +ui_print "- Extracting module files" + +extract "$ZIPFILE" 'module.prop' "$MODPATH" +extract "$ZIPFILE" 'post-fs-data.sh' "$MODPATH" +extract "$ZIPFILE" 'uninstall.sh' "$MODPATH" +#extract "$ZIPFILE" 'sepolicy.rule' "$MODPATH" + +if [ "$ARCH" = "x86" ] || [ "$ARCH" = "x64" ]; then + ui_print "- Extracting x86 libraries" + extract "$ZIPFILE" "system_x86/lib/libriru_$RIRU_MODULE_ID.so" "$MODPATH" + mv "$MODPATH/system_x86/lib" "$MODPATH/system/lib" + + if [ "$IS64BIT" = true ]; then + ui_print "- Extracting x64 libraries" + extract "$ZIPFILE" "system_x86/lib64/libriru_$RIRU_MODULE_ID.so" "$MODPATH" + mv "$MODPATH/system_x86/lib64" "$MODPATH/system/lib64" + fi +else + ui_print "- Extracting arm libraries" + extract "$ZIPFILE" "system/lib/libriru_$RIRU_MODULE_ID.so" "$MODPATH" + + if [ "$IS64BIT" = true ]; then + ui_print "- Extracting arm64 libraries" + extract "$ZIPFILE" "system/lib64/libriru_$RIRU_MODULE_ID.so" "$MODPATH" + fi +fi + +# Riru files +ui_print "- Extracting extra files" +[ -d "$RIRU_MODULE_PATH" ] || mkdir -p "$RIRU_MODULE_PATH" || abort "! Can't create $RIRU_MODULE_PATH" + +# set permission just in case +set_perm "$RIRU_PATH" 0 0 0700 +set_perm "$RIRU_PATH/modules" 0 0 0700 +set_perm "$RIRU_MODULE_PATH" 0 0 0700 +set_perm "$RIRU_MODULE_PATH/bin" 0 0 0700 + +rm -f "$RIRU_MODULE_PATH/module.prop.new" +extract "$ZIPFILE" 'riru/module.prop.new' "$RIRU_MODULE_PATH" true +set_perm "$RIRU_MODULE_PATH/module.prop.new" 0 0 0600 + +# set permissions +ui_print "- Setting permissions" +set_perm_recursive "$MODPATH" 0 0 0755 0644 diff --git a/template/magisk_module/post-fs-data.sh b/template/magisk_module/post-fs-data.sh new file mode 100644 index 00000000..076c4209 --- /dev/null +++ b/template/magisk_module/post-fs-data.sh @@ -0,0 +1,13 @@ +#!/system/bin/sh +MODDIR=${0%/*} +[ ! -f "$MODDIR/riru.sh" ] && exit 1 +. $MODDIR/riru.sh + +# Reset context jsut in case +chcon -R u:object_r:system_file:s0 "$MODDIR" + +# Rename module.prop.new +if [ -f "$RIRU_MODULE_PATH/module.prop.new" ]; then + rm "$RIRU_MODULE_PATH/module.prop" + mv "$RIRU_MODULE_PATH/module.prop.new" "$RIRU_MODULE_PATH/module.prop" +fi \ No newline at end of file diff --git a/template/magisk_module/riru.sh b/template/magisk_module/riru.sh new file mode 100644 index 00000000..3a5ae7c2 --- /dev/null +++ b/template/magisk_module/riru.sh @@ -0,0 +1,32 @@ +#!/sbin/sh +RIRU_PATH="/data/misc/riru" +RIRU_MODULE_ID="%%%RIRU_MODULE_ID%%%" +RIRU_MODULE_PATH="$RIRU_PATH/modules/$RIRU_MODULE_ID" +RIRU_MIN_API_VERSION=%%%RIRU_MIN_API_VERSION%%% +RIRU_MIN_VERSION_NAME="%%%RIRU_MIN_VERSION_NAME%%%" + +check_riru_version() { + if [ ! -f "$RIRU_PATH/api_version" ] && [ ! -f "$RIRU_PATH/api_version.new" ]; then + ui_print "*********************************************************" + ui_print "! Riru is not installed" + ui_print "! Please install Riru from Magisk Manager or https://github.com/RikkaApps/Riru/releases" + abort "*********************************************************" + fi + RIRU_API_VERSION=$(cat "$RIRU_PATH/api_version.new") || RIRU_API_VERSION=$(cat "$RIRU_PATH/api_version") || RIRU_API_VERSION=0 + [ "$RIRU_API_VERSION" -eq "$RIRU_API_VERSION" ] || RIRU_API_VERSION=0 + ui_print "- Riru API version: $RIRU_API_VERSION" + if [ "$RIRU_API_VERSION" -lt $RIRU_MIN_API_VERSION ]; then + ui_print "*********************************************************" + ui_print "! Riru $RIRU_MIN_VERSION_NAME or above is required" + ui_print "! Please upgrade Riru from Magisk Manager or https://github.com/RikkaApps/Riru/releases" + abort "*********************************************************" + fi +} + +check_architecture() { + if [ "$ARCH" != "arm" ] && [ "$ARCH" != "arm64" ] && [ "$ARCH" != "x86" ] && [ "$ARCH" != "x64" ]; then + abort "! Unsupported platform: $ARCH" + else + ui_print "- Device platform: $ARCH" + fi +} \ No newline at end of file diff --git a/template/magisk_module/uninstall.sh b/template/magisk_module/uninstall.sh new file mode 100644 index 00000000..3c83e2be --- /dev/null +++ b/template/magisk_module/uninstall.sh @@ -0,0 +1,6 @@ +#!/sbin/sh +MODDIR=${0%/*} +[ ! -f "$MODDIR/riru.sh" ] && exit 1 +. $MODDIR/riru.sh + +rm -rf "$RIRU_MODULE_PATH" \ No newline at end of file diff --git a/template/magisk_module/verify.sh b/template/magisk_module/verify.sh new file mode 100644 index 00000000..b04a9ea3 --- /dev/null +++ b/template/magisk_module/verify.sh @@ -0,0 +1,39 @@ +TMPDIR_FOR_VERIFY="$TMPDIR/.vunzip" +mkdir "$TMPDIR_FOR_VERIFY" + +abort_verify() { + ui_print "*********************************************************" + ui_print "! $1" + ui_print "! This zip may be corrupted, please try downloading again" + abort "*********************************************************" +} + +# extract +extract() { + zip=$1 + file=$2 + dir=$3 + junk_paths=$4 + [ -z "$junk_paths" ] && junk_paths=false + opts="-o" + [ $junk_paths = true ] && opts="-oj" + + file_path="" + hash_path="" + if [ $junk_paths = true ]; then + file_path="$dir/$(basename "$file")" + hash_path="$TMPDIR_FOR_VERIFY/$(basename "$file").sha256sum" + else + file_path="$dir/$file" + hash_path="$TMPDIR_FOR_VERIFY/$file.sha256sum" + fi + + unzip $opts "$zip" "$file" -d "$dir" >&2 + [ -f "$file_path" ] || abort_verify "$file not exists" + + unzip $opts "$zip" "$file.sha256sum" -d "$TMPDIR_FOR_VERIFY" >&2 + [ -f "$hash_path" ] || abort_verify "$file.sha256sum not exists" + + (echo "$(cat "$hash_path") $file_path" | sha256sum -c -s -) || abort_verify "Failed to verify $file" + ui_print "- Verified $file" >&1 +} \ No newline at end of file